blob: 1c63542dcb7a522b1d66dbd9677995905e5ba1ce [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "LstmTestImpl.hpp"
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
9
Matthew Sloyan171214c2020-09-09 09:07:37 +010010#include <armnn/utility/NumericCast.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010011
James Conroy1f58f032021-04-27 17:13:27 +010012#include <backendsCommon/TensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010013
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010014#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <reference/workloads/Decoders.hpp>
18#include <reference/workloads/Encoders.hpp>
19#include <reference/workloads/LstmUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010020
David Beckac42efd2018-09-26 17:41:13 +010021#include <test/TensorHelpers.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010023#include <boost/multi_array.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010024
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010025namespace
26{
Jan Eilers38e05bd2019-06-26 13:10:09 +010027
28template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
29void LstmUtilsVectorBatchVectorAddTestImpl(
30 boost::multi_array<float, 1>& vec,
31 boost::multi_array<float, 2>& batchVec,
32 uint32_t vSize,
33 uint32_t nBatch,
34 boost::multi_array<float, 2>& expectedOutput )
35{
36 float qScale = 0.0f;
37 int32_t qOffset = 0;
38 armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
39
40 // Make encoder and decoder
41 std::unique_ptr<armnn::Decoder<float>> vecDecoder = armnn::MakeDecoder<float>(tensorInfo, vec.data());
42 std::unique_ptr<armnn::Decoder<float>> batchVecDecoder = armnn::MakeDecoder<float>(tensorInfo, batchVec.data());
43 std::unique_ptr<armnn::Encoder<float>> batchVecEncoder = armnn::MakeEncoder<float>(tensorInfo, batchVec.data());
44
45 VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
46
47 // check shape and compare values
Colm Donelan25ab3a82021-05-17 13:01:52 +010048 auto result = CompareTensors(batchVec, expectedOutput);
49 BOOST_TEST(result.m_Result, result.m_Message.str());
Jan Eilers38e05bd2019-06-26 13:10:09 +010050
51 // check if iterator is back at start position
52 batchVecEncoder->Set(1.0f);
53 BOOST_TEST(batchVec[0][0] == 1.0f);
54}
55
56template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
57void LstmUtilsZeroVectorTestImpl(
58 boost::multi_array<float, 1>& input,
59 uint32_t vSize,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010060 boost::multi_array<float, 1>& expectedOutput)
61{
Jan Eilers38e05bd2019-06-26 13:10:09 +010062 float qScale = 0.0f;
63 int32_t qOffset = 0;
64
65 armnn::TensorInfo tensorInfo({vSize}, ArmnnType, qScale, qOffset );
66
67 // Make encoder for input
68 std::unique_ptr<armnn::Encoder<float>> outputEncoder = armnn::MakeEncoder<float>(tensorInfo, input.data());
69
70 // call ZeroVector
71 ZeroVector(*outputEncoder, vSize);
72
73 // check shape and compare values
Colm Donelan25ab3a82021-05-17 13:01:52 +010074 auto result = CompareTensors(input, expectedOutput);
75 BOOST_TEST(result.m_Result, result.m_Message.str());
Jan Eilers38e05bd2019-06-26 13:10:09 +010076
77 // check if iterator is back at start position
78 outputEncoder->Set(1.0f);
79 BOOST_TEST(input[0] == 1.0f);
80
81}
82
Jan Eilers38e05bd2019-06-26 13:10:09 +010083template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
84void LstmUtilsMeanStddevNormalizationTestImpl(
85 boost::multi_array<float, 2>& input,
86 uint32_t vSize,
87 uint32_t nBatch,
88 boost::multi_array<float, 2>& expectedOutput)
89{
90 float qScale = 0.0f;
91 int32_t qOffset = 0;
92 armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
93
94 // Make encoder and decoder for input
95 std::unique_ptr<armnn::Decoder<float>> inputDecoder = armnn::MakeDecoder<float>(tensorInfo, input.data());
96 std::unique_ptr<armnn::Encoder<float>> outputEncoder = armnn::MakeEncoder<float>(tensorInfo, input.data());
97
98 MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f);
99
100 // check shape and compare values
Colm Donelan25ab3a82021-05-17 13:01:52 +0100101 auto result = CompareTensors(input, expectedOutput);
102 BOOST_TEST(result.m_Result, result.m_Message.str());
Jan Eilers38e05bd2019-06-26 13:10:09 +0100103
104 // check if iterator is back at start position
105 outputEncoder->Set(1.0f);
106 BOOST_TEST(input[0][0] == 1.0f);
107}
108
109template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
110void LstmUtilsVectorBatchVectorCwiseProductTestImpl(
111 boost::multi_array<float, 1>& vec,
112 boost::multi_array<float, 2>& batchVec,
113 uint32_t vSize,
114 uint32_t nBatch,
115 boost::multi_array<float, 2>& expectedOutput)
116{
117 float qScale = 0.0f;
118 int32_t qOffset = 0;
119 armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
120
121 // Make encoder and decoder
122 std::unique_ptr<armnn::Decoder<float>> vecDecoder = armnn::MakeDecoder<float>(tensorInfo, vec.data());
123 std::unique_ptr<armnn::Decoder<float>> batchVecDecoder = armnn::MakeDecoder<float>(tensorInfo, batchVec.data());
124 std::unique_ptr<armnn::Encoder<float>> batchVecEncoder = armnn::MakeEncoder<float>(tensorInfo, batchVec.data());
125
126 VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
127
128 // check shape and compare values
Colm Donelan25ab3a82021-05-17 13:01:52 +0100129 auto result = CompareTensors(batchVec, expectedOutput);
130 BOOST_TEST(result.m_Result, result.m_Message.str());
Jan Eilers38e05bd2019-06-26 13:10:09 +0100131
132 // check if iterator is back at start position
133 batchVecEncoder->Set(1.0f);
134 BOOST_TEST(batchVec[0][0] == 1.0f);
135}
136
137// Lstm Layer tests:
James Conroy9c3cae82019-08-01 16:01:48 +0100138// *********************************** //
Conor Kennedyb9971c92019-05-07 07:14:23 +0100139template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
140LayerTestResult<T, 2>
141LstmNoCifgNoPeepholeNoProjectionTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000142 armnn::IWorkloadFactory& workloadFactory,
143 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsc43de6a2020-08-27 11:13:25 +0100144 const armnn::ITensorHandleFactory& tensorHandleFactory,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100145 const boost::multi_array<T, 2>& input,
146 const boost::multi_array<T, 2>& outputExpected,
147 float qScale = 0.0f,
148 int32_t qOffset = 0,
149 armnn::DataType constantDataType = armnn::DataType::Float32)
telsoa01c577f2c2018-08-31 09:22:23 +0100150{
Jan Eilers8eb25602020-03-09 12:13:48 +0000151 IgnoreUnused(memoryManager);
Matthew Sloyan171214c2020-09-09 09:07:37 +0100152 unsigned int batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
153 unsigned int inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
154 unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
telsoa01c577f2c2018-08-31 09:22:23 +0100155 // cellSize and outputSize have the same size when there is no projection.
156 unsigned numUnits = outputSize;
157
Conor Kennedyb9971c92019-05-07 07:14:23 +0100158 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset );
159 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
160 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100161
Conor Kennedyb9971c92019-05-07 07:14:23 +0100162 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
163 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
164 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
165 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100166
Conor Kennedyb9971c92019-05-07 07:14:23 +0100167 LayerTestResult<T, 2> ret(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100168
Rob Hughesbb46dde2020-05-20 15:27:37 +0100169 std::vector<T> inputVector;
telsoa01c577f2c2018-08-31 09:22:23 +0100170 inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
Rob Hughesbb46dde2020-05-20 15:27:37 +0100171 auto inputTensor = MakeTensor<T,2>(inputTensorInfo, inputVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100172
Rob Hughesbb46dde2020-05-20 15:27:37 +0100173 std::vector<T> cellStateInVector(batchSize * numUnits, T());
174 auto cellStateInTensor = MakeTensor<T,2>(cellStateInTensorInfo, cellStateInVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100175
Rob Hughesbb46dde2020-05-20 15:27:37 +0100176 std::vector<T> outputStateInVector(batchSize * outputSize, T());
177 auto outputStateInTensor = MakeTensor<T,2>(outputStateInTensorInfo, outputStateInVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100178
Rob Hughesbb46dde2020-05-20 15:27:37 +0100179 std::vector<T> scratchBufferVector(batchSize * numUnits * 4, T());
180 auto scratchBufferTensor = MakeTensor<T,2>(scratchBufferTensorInfo, scratchBufferVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100181
Rob Hughesbb46dde2020-05-20 15:27:37 +0100182 std::vector<T> outputStateOutVector(batchSize * outputSize, T());
183 auto outputStateOutTensor = MakeTensor<T,2>(outputStateOutTensorInfo, outputStateOutVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100184
Rob Hughesbb46dde2020-05-20 15:27:37 +0100185 std::vector<T> cellStateOutVector(batchSize * numUnits, T());
186 auto cellStateOutTensor = MakeTensor<T,2>(cellStateOutTensorInfo, cellStateOutVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100187
Rob Hughesbb46dde2020-05-20 15:27:37 +0100188 std::vector<T> outputVector;
telsoa01c577f2c2018-08-31 09:22:23 +0100189 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
Rob Hughesbb46dde2020-05-20 15:27:37 +0100190 ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, outputVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100191
Finn Williamsc43de6a2020-08-27 11:13:25 +0100192 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100193 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +0100194 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100195 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +0100196 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100197
Finn Williamsc43de6a2020-08-27 11:13:25 +0100198 std::unique_ptr<armnn::ITensorHandle> scratchHandle =
199 tensorHandleFactory.CreateTensorHandle(scratchBufferTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100200 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +0100201 tensorHandleFactory.CreateTensorHandle(outputStateOutTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100202 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +0100203 tensorHandleFactory.CreateTensorHandle(cellStateOutTensorInfo);
204 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100205
206 armnn::LstmQueueDescriptor data;
207 armnn::WorkloadInfo info;
208
209 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
210 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
211 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
212
213 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
214 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
215 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
216 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
217
Conor Kennedyb9971c92019-05-07 07:14:23 +0100218 armnn::TensorInfo tensorInfo4({numUnits}, constantDataType , qScale, qOffset);
219 armnn::TensorInfo tensorInfo8({numUnits, 2}, constantDataType, qScale, qOffset);
220 armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100221
222 auto inputToInputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f,
223 -0.34550029f, 0.04266912f, -0.15680569f,
224 -0.34856534f, 0.43890524f});
225
226 auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfo8, {0.09701663f, 0.20334584f, -0.50592935f,
227 -0.31343272f, -0.40032279f, 0.44781327f,
228 0.01387155f, -0.35593212f});
229
230 auto inputToCellWeights = MakeTensor<float, 2>(tensorInfo8, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
231 -0.20583314f, 0.44344562f, 0.22077113f,
232 -0.29909778f});
233
234 auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.25065863f, -0.28290087f, 0.04613829f,
235 0.40525138f, 0.44272184f, 0.03897077f,
236 -0.1556896f, 0.19487578f});
237
238 auto recurrentToInputWeights = MakeTensor<float, 2>(tensorInfo16, {-0.0063535f, -0.2042388f, 0.31454784f,
239 -0.35746509f, 0.28902304f, 0.08183324f,
240 -0.16555229f, 0.02286911f, -0.13566875f,
241 0.03034258f, 0.48091322f, -0.12528998f,
242 0.24077177f, -0.51332325f, -0.33502164f,
243 0.10629296f});
244
245 auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfo16, {-0.48684245f, -0.06655136f, 0.42224967f,
246 0.2112639f, 0.27654213f, 0.20864892f,
247 -0.07646349f, 0.45877004f, 0.00141793f,
248 -0.14609534f, 0.36447752f, 0.09196436f,
249 0.28053468f, 0.01560611f, -0.20127171f,
250 -0.01140004f});
251
252 auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfo16, {-0.3407414f, 0.24443203f, -0.2078532f,
253 0.26320225f, 0.05695659f, -0.00123841f,
254 -0.4744786f, -0.35869038f, -0.06418842f,
255 -0.13502428f, -0.501764f, 0.22830659f,
256 -0.46367589f, 0.26016325f, -0.03894562f,
257 -0.16368064f});
258
259 auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfo16, {0.43385774f, -0.17194885f, 0.2718237f,
260 0.09215671f, 0.24107647f, -0.39835793f,
261 0.18212086f, 0.01301402f, 0.48572797f,
262 -0.50656658f, 0.20047462f, -0.20607421f,
263 -0.51818722f, -0.15390486f, 0.0468148f,
264 0.39922136f});
265
266 auto cellToInputWeights = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
267
268 auto inputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
269
270 auto forgetGateBias = MakeTensor<float, 1>(tensorInfo4, {1., 1., 1., 1.});
271
272 auto cellBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
273
274 auto outputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
275
James Conroy1f58f032021-04-27 17:13:27 +0100276 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo8);
277 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo8);
278 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo8);
279 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo8);
280 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
281 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
282 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
283 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
284 armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
285 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
286 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
287 armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
288 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
telsoa01c577f2c2018-08-31 09:22:23 +0100289
290 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
291 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
292 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
293 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
294 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
295 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
296 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
297 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
298 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
299 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
300 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
301 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
302 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
303
304 data.m_InputToInputWeights = &inputToInputWeightsTensor;
305 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
306 data.m_InputToCellWeights = &inputToCellWeightsTensor;
307 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
308 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
309 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
310 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
311 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100312 data.m_InputGateBias = &inputGateBiasTensor;
313 data.m_ForgetGateBias = &forgetGateBiasTensor;
314 data.m_CellBias = &cellBiasTensor;
315 data.m_OutputGateBias = &outputGateBiasTensor;
316
telsoa01c577f2c2018-08-31 09:22:23 +0100317 // Flags to set test configuration
318 data.m_Parameters.m_ActivationFunc = 4;
319 data.m_Parameters.m_CifgEnabled = false;
320 data.m_Parameters.m_PeepholeEnabled = false;
321 data.m_Parameters.m_ProjectionEnabled = false;
322
telsoa01c577f2c2018-08-31 09:22:23 +0100323 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
324 inputHandle->Allocate();
325 outputStateInHandle->Allocate();
326 cellStateInHandle->Allocate();
327
328 scratchHandle->Allocate();
329 outputStateOutHandle->Allocate();
330 cellStateOutHandle->Allocate();
331 outputHandle->Allocate();
332
333 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
334 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
335 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
336
telsoa01c577f2c2018-08-31 09:22:23 +0100337 workload->Execute();
338
339 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
340
341 return ret;
342}
343
Conor Kennedyb9971c92019-05-07 07:14:23 +0100344template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
345LayerTestResult<T, 2>
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000346LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsc43de6a2020-08-27 11:13:25 +0100348 const armnn::ITensorHandleFactory& tensorHandleFactory,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100349 const boost::multi_array<T, 2>& input,
350 const boost::multi_array<T, 2>& outputExpected,
351 float qScale = 0.0f,
352 int32_t qOffset = 0,
353 armnn::DataType constantDataType = armnn::DataType::Float32)
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000354{
Jan Eilers8eb25602020-03-09 12:13:48 +0000355 IgnoreUnused(memoryManager);
telsoa01c577f2c2018-08-31 09:22:23 +0100356 unsigned int batchSize = 2;
357 unsigned int outputSize = 16;
358 unsigned int inputSize = 5;
359 unsigned numUnits = 20;
360
Conor Kennedyb9971c92019-05-07 07:14:23 +0100361 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset);
362 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
363 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100364
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000365 // Scratch buffer size without CIFG [batchSize, numUnits * 4]
Conor Kennedyb9971c92019-05-07 07:14:23 +0100366 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
367 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
368 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
369 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100370
Conor Kennedyb9971c92019-05-07 07:14:23 +0100371 LayerTestResult<T, 2> ret(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100372
Rob Hughesbb46dde2020-05-20 15:27:37 +0100373 std::vector<T> inputVector;
telsoa01c577f2c2018-08-31 09:22:23 +0100374 inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
Rob Hughesbb46dde2020-05-20 15:27:37 +0100375 auto inputTensor = MakeTensor<T,2>(inputTensorInfo, inputVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100376
Rob Hughesbb46dde2020-05-20 15:27:37 +0100377 std::vector<T> cellStateInVector(batchSize * numUnits, T());
378 auto cellStateInTensor = MakeTensor<T,2>(cellStateInTensorInfo, cellStateInVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100379
Rob Hughesbb46dde2020-05-20 15:27:37 +0100380 std::vector<T> outputStateInVector(batchSize * outputSize, T());
381 auto outputStateInTensor = MakeTensor<T,2>(outputStateInTensorInfo, outputStateInVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100382
Rob Hughesbb46dde2020-05-20 15:27:37 +0100383 std::vector<T> scratchBufferVector(batchSize * numUnits * 4, T());
384 auto scratchBufferTensor = MakeTensor<T,2>(scratchBufferTensorInfo, scratchBufferVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100385
Rob Hughesbb46dde2020-05-20 15:27:37 +0100386 std::vector<T> outputStateOutVector(batchSize * outputSize, T());
387 auto outputStateOutTensor = MakeTensor<T,2>(outputStateOutTensorInfo, outputStateOutVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100388
Rob Hughesbb46dde2020-05-20 15:27:37 +0100389 std::vector<T> cellStateOutVector(batchSize * numUnits, T());
390 auto cellStateOutTensor = MakeTensor<T,2>(cellStateOutTensorInfo, cellStateOutVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100391
Rob Hughesbb46dde2020-05-20 15:27:37 +0100392 std::vector<T> outputVector;
telsoa01c577f2c2018-08-31 09:22:23 +0100393 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
Rob Hughesbb46dde2020-05-20 15:27:37 +0100394 ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, outputVector);
telsoa01c577f2c2018-08-31 09:22:23 +0100395
Finn Williamsc43de6a2020-08-27 11:13:25 +0100396 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100397 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +0100398 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100399 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +0100400 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100401
Finn Williamsc43de6a2020-08-27 11:13:25 +0100402 std::unique_ptr<armnn::ITensorHandle> scratchHandle =
403 tensorHandleFactory.CreateTensorHandle(scratchBufferTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100404 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +0100405 tensorHandleFactory.CreateTensorHandle(outputStateOutTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100406 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +0100407 tensorHandleFactory.CreateTensorHandle(cellStateOutTensorInfo);
408 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100409
410 armnn::LstmQueueDescriptor data;
411 armnn::WorkloadInfo info;
412
413 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
414 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
415 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
David Beckac42efd2018-09-26 17:41:13 +0100416
telsoa01c577f2c2018-08-31 09:22:23 +0100417 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
418 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
419 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
420 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
421
Conor Kennedyb9971c92019-05-07 07:14:23 +0100422 armnn::TensorInfo tensorInfo16({outputSize}, constantDataType, qScale, qOffset);
423 armnn::TensorInfo tensorInfo20({numUnits}, constantDataType, qScale, qOffset);
424 armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, constantDataType, qScale, qOffset);
425 armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, constantDataType, qScale, qOffset);
426 armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, constantDataType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100427
428 auto inputToInputWeights =
429 MakeTensor<float, 2>(tensorInfo20x5, {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f,
430 0.09171803f, 0.14647801f,0.10797193f, -0.0057968358f,0.0019193048f,
431 -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f,
432 -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f,
433 -0.008045952f,0.015478081f, 0.055217247f, 0.038719587f, 0.044153627f,
434 -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f,
435 -0.1671009f, -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f,
436 0.25005487f, -0.22790983f, 0.009855087f, -0.028140958f, -0.11200698f,
437 0.11295408f, -0.0035217577f, 0.054485075f, 0.05184695f, 0.064711206f,
438 0.10989193f, 0.11674786f, 0.03490607f, 0.07727357f, 0.11390585f,
439 -0.1863375f, -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f,
440 0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f, 0.14545603f,
441 -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f,
442 -0.042484224f, -0.11827596f, -0.09171104f, -0.10808628f,-0.16327988f,
443 -0.2273378f, -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f,
444 0.0038534778f, 0.054764505f, 0.089753784f, 0.06947234f, 0.08014476f,
445 -0.04544234f, -0.0497073f,-0.07135631f, -0.048929106f,-0.004042012f,
446 -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f,
447 -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f,
448 -0.39292613f, -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f
449 });
450
451 auto inputToForgetWeights =
452 MakeTensor<float, 2>(tensorInfo20x5, {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f,
453 -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f,
454 -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f,
455 0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f,
456 0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f,
457 -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f,
458 -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f,
459 0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f,
460 0.06958324f, 0.034257296f, 0.0482646f, 0.06267997f,0.052625068f,
461 0.12784666f, 0.07077897f, 0.025725935f, 0.04165009f,0.07241905f,
462 0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f,
463 -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f,
464 0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f,
465 -0.08402166f,-0.01901462f, -0.044678304f,-0.07720565f,0.014350063f,
466 -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f,
467 0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f,
468 0.036881298f, 0.02913376f, 0.03420159f,0.05448447f,-0.054523353f,
469 0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f,
470 -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f,
471 0.0001771948f, -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f
472 });
473
474 auto inputToCellWeights =
475 MakeTensor<float, 2>(tensorInfo20x5, {-0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
476 -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
477 -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
478 -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
479 -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
480 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f,
481 -0.13002433f, -0.036816437f, -0.02130134f, -0.016518239f,
482 0.0047691227f, -0.0025825808f, 0.066017866f, 0.029991534f,
483 -0.10652836f, -0.1037554f, -0.13056071f, -0.03266643f,
484 -0.033702414f, -0.006473424f, -0.04611692f, 0.014419339f,
485 -0.025174323f, 0.0396852f, 0.081777506f, 0.06157468f,
486 0.10210095f, -0.009658194f, 0.046511717f, 0.03603906f,
487 0.0069369148f, 0.015960095f, -0.06507666f, 0.09551598f,
488 0.053568836f, 0.06408714f, 0.12835667f, -0.008714329f,
489 -0.20211966f, -0.12093674f, 0.029450472f, 0.2849013f,
490 -0.029227901f, 0.1164364f, -0.08560263f, 0.09941786f,
491 -0.036999565f, -0.028842626f, -0.0033637602f, -0.017012902f,
492 -0.09720865f, -0.11193351f, -0.029155117f, -0.017936034f,
493 -0.009768936f, -0.04223324f, -0.036159635f, 0.06505112f,
494 -0.021742892f, -0.023377212f, -0.07221364f, -0.06430552f,
495 0.05453865f, 0.091149814f, 0.06387331f, 0.007518393f,
496 0.055960953f, 0.069779344f, 0.046411168f, 0.10509911f,
497 0.07463894f, 0.0075130584f, 0.012850982f, 0.04555431f,
498 0.056955688f, 0.06555285f, 0.050801456f, -0.009862683f,
499 0.00826772f, -0.026555609f, -0.0073611983f, -0.0014897042f
500 });
501
502 auto inputToOutputWeights =
503 MakeTensor<float, 2>(tensorInfo20x5, {-0.0998932f, -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f,
504 -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f, -0.15093534f,
505 0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f,
506 -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f,
507 -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f,
508 0.10124236f, 0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f,
509 -0.027833903f, 0.029774971f, 0.1130802f, 0.09218906f, 0.09506135f,
510 -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f,
511 -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f,
512 -0.11366429f, 0.035777505f, 0.13568819f, 0.052451383f,0.050649304f,
513 0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f,
514 0.04974699f, 0.014160473f, 0.06973932f, 0.04964942f, 0.033364646f,
515 0.08190124f, 0.025535367f, 0.050893165f, 0.048514254f,0.06945813f,
516 -0.078907564f,-0.06707616f, -0.11844508f, -0.09986688f,-0.07509403f,
517 0.06263226f, 0.14925587f, 0.20188436f, 0.12098451f,0.14639415f,
518 0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f,
519 -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f, 0.021544158f,
520 0.08949725f, 0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f,
521 -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f,
522 -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f
523 });
524
525 auto inputGateBias =
526 MakeTensor<float, 1>(tensorInfo20, {0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
527 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
528 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
529 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
530 });
531
532 auto forgetGateBias =
533 MakeTensor<float, 1>(tensorInfo20, {0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f,
534 0.11098921f, 0.15378423f, 0.09263801f, 0.09790885f,
535 0.09508917f, 0.061199076f, 0.07665568f, -0.015443159f,
536 -0.03499149f, 0.046190713f, 0.08895977f, 0.10899629f,
537 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
538 });
539
540 auto cellBias =
541 MakeTensor<float, 1>(tensorInfo20, {-0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f,
542 -0.1483596f, -0.10639995f, -0.091433935f, 0.058573797f,
543 -0.06809782f, -0.07889636f, -0.043246906f, -0.09829136f,
544 -0.4279842f, 0.034901652f, 0.18797937f, 0.0075234566f,
545 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
546 });
547
548 auto outputGateBias =
549 MakeTensor<float, 1>(tensorInfo20, {0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
550 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
551 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
552 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
553 });
554
555 auto recurrentToInputWeights =
556 MakeTensor<float, 2>(tensorInfo20x16, {-0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
557 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
558 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
559 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
560 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
561 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
562 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
563 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
564 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f,
565 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
566 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
567 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
568 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f,
569 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
570 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
571 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
572 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f,
573 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
574 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
575 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
576 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f,
577 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
578 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
579 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
580 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f,
581 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
582 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
583 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
584 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f,
585 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
586 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
587 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
588 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f,
589 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
590 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
591 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
592 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f,
593 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
594 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
595 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
596 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f,
597 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
598 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
599 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
600 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f,
601 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
602 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
603 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
604 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f,
605 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
606 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
607 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
608 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f,
609 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
610 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
611 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
612 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f,
613 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
614 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
615 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
616 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f,
617 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
618 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
619 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
620 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f,
621 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
622 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
623 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
624 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f,
625 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
626 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
627 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
628 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f,
629 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
630 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
631 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
632 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f,
633 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
634 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
635 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
636 });
637
638 auto recurrentToForgetWeights =
639 MakeTensor<float, 2>(tensorInfo20x16, {-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
640 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
641 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
642 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
643 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
644 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
645 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
646 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
647 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f,
648 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
649 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
650 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
651 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f,
652 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
653 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
654 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
655 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f,
656 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
657 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
658 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
659 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f,
660 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
661 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
662 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
663 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f,
664 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
665 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
666 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
667 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f,
668 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
669 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
670 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
671 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f,
672 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
673 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
674 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
675 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f,
676 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
677 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
678 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
679 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f,
680 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
681 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
682 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
683 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f,
684 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
685 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
686 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
687 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f,
688 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
689 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
690 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
691 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f,
692 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
693 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
694 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
695 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f,
696 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
697 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
698 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
699 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f,
700 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
701 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
702 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
703 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f,
704 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
705 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
706 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
707 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f,
708 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
709 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
710 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
711 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f,
712 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
713 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
714 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
715 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f,
716 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
717 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
718 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
719 });
720
721 auto recurrentToCellWeights =
722 MakeTensor<float, 2>(tensorInfo20x16, {-0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
723 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
724 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
725 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
726 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
727 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
728 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
729 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
730 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
731 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
732 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
733 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
734 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
735 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
736 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
737 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
738 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
739 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
740 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
741 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
742 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
743 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
744 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
745 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
746 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
747 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
748 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
749 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
750 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
751 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
752 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
753 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
754 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
755 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
756 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
757 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
758 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
759 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
760 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
761 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
762 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
763 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
764 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
765 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
766 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
767 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
768 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
769 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
770 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
771 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
772 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
773 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
774 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
775 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
776 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
777 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
778 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
779 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
780 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
781 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
782 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
783 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
784 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
785 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
786 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
787 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
788 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
789 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
790 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
791 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
792 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
793 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
794 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
795 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
796 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
797 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
798 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
799 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
800 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
801 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
802 });
803
804 auto recurrentToOutputWeights =
805 MakeTensor<float, 2>(tensorInfo20x16, {0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f,
806 -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f,
807 -0.029587349f, -0.044576716f, -0.07480124f, -0.082868785f,
808 0.023254942f, 0.027502948f, -0.0039728214f, -0.08683098f,
809 -0.08116779f, -0.014675607f, -0.037924774f, -0.023314456f,
810 -0.007401714f, -0.09255757f, 0.029460307f, -0.08829125f,
811 -0.005139627f, -0.08989442f, -0.0555066f, 0.13596267f,
812 -0.025062224f, -0.048351806f, -0.03850004f, 0.07266485f,
813 -0.022414139f, 0.05940088f, 0.075114764f, 0.09597592f,
814 -0.010211725f, -0.0049794707f, -0.011523867f, -0.025980417f,
815 0.072999895f, 0.11091378f, -0.081685916f, 0.014416728f,
816 0.043229222f, 0.034178585f, -0.07530371f, 0.035837382f,
817 -0.085607f, -0.007721233f, -0.03287832f, -0.043848954f,
818 -0.06404588f, -0.06632928f, -0.073643476f, 0.008214239f,
819 -0.045984086f, 0.039764922f, 0.03474462f, 0.060612556f,
820 -0.080590084f, 0.049127717f, 0.04151091f, -0.030063879f,
821 0.008801774f, -0.023021035f, -0.019558564f, 0.05158114f,
822 -0.010947698f, -0.011825728f, 0.0075720972f, 0.0699727f,
823 -0.0039981045f, 0.069350146f, 0.08799282f, 0.016156472f,
824 0.035502106f, 0.11695009f, 0.006217345f, 0.13392477f,
825 -0.037875112f, 0.025745004f, 0.08940699f, -0.00924166f,
826 0.0046702605f, -0.036598757f, -0.08811812f, 0.10522024f,
827 -0.032441203f, 0.008176899f, -0.04454919f, 0.07058152f,
828 0.0067963637f, 0.039206743f, 0.03259838f, 0.03725492f,
829 -0.09515802f, 0.013326398f, -0.052055415f, -0.025676316f,
830 0.03198509f, -0.015951829f, -0.058556724f, 0.036879618f,
831 0.043357447f, 0.028362012f, -0.05908629f, 0.0059240665f,
832 -0.04995891f, -0.019187413f,0.0276265f, -0.01628143f, 0.0025863599f,
833 0.08800015f, 0.035250366f, -0.022165963f, -0.07328642f,
834 -0.009415526f, -0.07455109f, 0.11690406f, 0.0363299f,
835 0.07411125f, 0.042103454f, -0.009660886f, 0.019076364f,
836 0.018299393f, -0.046004917f, 0.08891175f,0.0431396f, -0.026327137f,
837 -0.051502608f, 0.08979574f, -0.051670972f, 0.04940282f,
838 -0.07491107f, -0.021240504f, 0.022596184f, -0.034280192f,
839 0.060163025f, -0.058211457f, -0.051837247f, -0.01349775f,
840 -0.04639988f, -0.035936575f, -0.011681591f, 0.064818054f,
841 0.0073146066f, -0.021745546f, -0.043124277f, -0.06471268f,
842 -0.07053354f, -0.029321948f, -0.05330136f, 0.016933719f,
843 -0.053782392f, 0.13747959f, -0.1361751f, -0.11569455f,
844 0.0033329215f, 0.05693899f, -0.053219706f, 0.063698f,
845 0.07977434f, -0.07924483f, 0.06936997f, 0.0034815092f,
846 -0.007305279f, -0.037325785f, -0.07251102f, -0.033633437f,
847 -0.08677009f, 0.091591336f, -0.14165086f, 0.021752775f,
848 0.019683983f, 0.0011612234f, -0.058154266f, 0.049996935f,
849 0.0288841f, -0.0024567875f, -0.14345716f, 0.010955264f,-0.10234828f,
850 0.1183656f, -0.0010731248f, -0.023590032f,-0.072285876f,-0.0724771f,
851 -0.026382286f, -0.0014920527f, 0.042667855f, 0.0018776858f,
852 0.02986552f, 0.009814309f, 0.0733756f, 0.12289186f,
853 0.018043943f, -0.0458958f, 0.049412545f, 0.033632483f,
854 0.05495232f, 0.036686596f, -0.013781798f, -0.010036754f,
855 0.02576849f, -0.08307328f, 0.010112348f, 0.042521734f,
856 -0.05869831f, -0.071689695f, 0.03876447f, -0.13275425f, -0.0352966f,
857 -0.023077697f, 0.10285965f, 0.084736146f, 0.15568255f,
858 -0.00040734606f, 0.027835453f, -0.10292561f, -0.032401145f,
859 0.10053256f, -0.026142767f, -0.08271222f, -0.0030240538f,
860 -0.016368777f, 0.1070414f, 0.042672627f, 0.013456989f,
861 -0.0437609f, -0.022309763f, 0.11576483f, 0.04108048f,
862 0.061026827f, -0.0190714f, -0.0869359f, 0.037901703f, 0.0610107f,
863 0.07202949f, 0.01675338f, 0.086139716f, -0.08795751f,
864 -0.014898893f, -0.023771819f, -0.01965048f, 0.007955471f,
865 -0.043740474f, 0.03346837f, -0.10549954f, 0.090567775f,
866 0.042013682f, -0.03176985f, 0.12569028f, -0.02421228f,
867 -0.029526481f, 0.023851605f, 0.031539805f, 0.05292009f,
868 -0.02344001f, -0.07811758f, -0.08834428f, 0.10094801f,
869 0.16594367f, -0.06861939f, -0.021256343f, -0.041093912f,
870 -0.06669611f, 0.035498552f, 0.021757556f, -0.09302526f,
871 -0.015403468f, -0.06614931f, -0.051798206f, -0.013874718f,
872 0.03630673f, 0.010412845f, -0.08077351f, 0.046185967f,
873 0.0035662893f, 0.03541868f, -0.094149634f, -0.034814864f,
874 0.003128424f, -0.020674974f, -0.03944324f, -0.008110165f,
875 -0.11113267f, 0.08484226f, 0.043586485f, 0.040582247f,
876 0.0968012f, -0.065249965f, -0.028036479f, 0.0050708856f,
877 0.0017462453f, 0.0326779f, 0.041296225f, 0.09164146f,
878 -0.047743853f, -0.015952192f, -0.034451712f, 0.084197424f,
879 -0.05347844f, -0.11768019f, 0.085926116f, -0.08251791f,
880 -0.045081906f, 0.0948852f, 0.068401024f, 0.024856757f,
881 0.06978981f, -0.057309967f, -0.012775832f, -0.0032452994f,
882 0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f
883 });
884
885 auto cellToInputWeights =
886 MakeTensor<float, 1>(tensorInfo20, {0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
887 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
888 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f,
889 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
890 });
891
892
893 auto cellToForgetWeights =
894 MakeTensor<float, 1>(tensorInfo20, {-0.01998659f,-0.15568835f,-0.24248174f, -0.012770197f, 0.041331276f,
895 -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f,
896 -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f, -0.020432774f,
897 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
898 });
899
900 auto cellToOutputWeights =
901 MakeTensor<float, 1>(tensorInfo20, {0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
902 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
903 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
904 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
905 });
906
907 auto projectionWeights =
908 MakeTensor<float, 2>(tensorInfo16x20,
909 {-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
910 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
911 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
912 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
913 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
914 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
915 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
916 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
917 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
918 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
919 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
920 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
921 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
922 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
923 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
924 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
925 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
926 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
927 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
928 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
929 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
930 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
931 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
932 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
933 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
934 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
935 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
936 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
937 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
938 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
939 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
940 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
941 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
942 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
943 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
944 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
945 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
946 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
947 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
948 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
949 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
950 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
951 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
952 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
953 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
954 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
955 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
956 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
957 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
958 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
959 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
960 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
961 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
962 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
963 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
964 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
965 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
966 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
967 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
968 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
969 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
970 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
971 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
972 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
973 });
974
975 std::vector<float> projectionBiasVector(outputSize, 0.f);
976 auto projectionBias = MakeTensor<float,1>(tensorInfo16, projectionBiasVector);
977
James Conroy1f58f032021-04-27 17:13:27 +0100978 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
979 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
980 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
981 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
982 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
983 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
984 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
985 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
986 armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo20);
987 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo20);
988 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo20);
989 armnn::ScopedTensorHandle cellBiasTensor(tensorInfo20);
990 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo20);
991 armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo20);
992 armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo20);
993 armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo16x20);
994 armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo16);
telsoa01c577f2c2018-08-31 09:22:23 +0100995
996 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
997 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
998 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
999 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1000 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
1001 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1002 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1003 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1004 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
1005 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
1006 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1007 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1008 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1009 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1010 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1011 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
1012 AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
1013
1014 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1015 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1016 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1017 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1018 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1019 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1020 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1021 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1022 data.m_CellToInputWeights = &cellToInputWeightsTensor;
1023 data.m_InputGateBias = &inputGateBiasTensor;
1024 data.m_ForgetGateBias = &forgetGateBiasTensor;
1025 data.m_CellBias = &cellBiasTensor;
1026 data.m_OutputGateBias = &outputGateBiasTensor;
1027 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1028 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1029 data.m_ProjectionWeights = &projectionWeightsTensor;
1030 data.m_ProjectionBias = &projectionBiasTensor;
1031
1032 // Flags to set test configuration
1033 data.m_Parameters.m_ActivationFunc = 4;
1034 data.m_Parameters.m_CifgEnabled = false;
1035 data.m_Parameters.m_PeepholeEnabled = true;
1036 data.m_Parameters.m_ProjectionEnabled = true;
1037
1038
1039 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1040 inputHandle->Allocate();
1041 outputStateInHandle->Allocate();
1042 cellStateInHandle->Allocate();
1043
1044 scratchHandle->Allocate();
1045 outputStateOutHandle->Allocate();
1046 cellStateOutHandle->Allocate();
1047 outputHandle->Allocate();
1048
1049 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1050 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1051 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1052
telsoa01c577f2c2018-08-31 09:22:23 +01001053 workload->Execute();
1054
1055 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1056
1057 return ret;
1058
1059}
1060
Conor Kennedyb9971c92019-05-07 07:14:23 +01001061template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1062LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001063 armnn::IWorkloadFactory& workloadFactory,
1064 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsc43de6a2020-08-27 11:13:25 +01001065 const armnn::ITensorHandleFactory& tensorHandleFactory,
Conor Kennedyb9971c92019-05-07 07:14:23 +01001066 const boost::multi_array<T, 2>& input,
1067 const boost::multi_array<T, 2>& outputExpected,
1068 float qScale = 0.0f,
1069 int32_t qOffset = 0,
1070 armnn::DataType constantDataType = armnn::DataType::Float32)
telsoa01c577f2c2018-08-31 09:22:23 +01001071{
Jan Eilers8eb25602020-03-09 12:13:48 +00001072 IgnoreUnused(memoryManager);
telsoa01c577f2c2018-08-31 09:22:23 +01001073 bool cifgEnabled = true;
1074 bool peepholeEnabled = true;
1075 bool projectionEnabled = false;
1076 // These are not the input and the output of Lstm yet
Matthew Sloyan171214c2020-09-09 09:07:37 +01001077 unsigned int batchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
1078 unsigned int inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
telsoa01c577f2c2018-08-31 09:22:23 +01001079
Matthew Sloyan171214c2020-09-09 09:07:37 +01001080 unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
telsoa01c577f2c2018-08-31 09:22:23 +01001081
1082 const unsigned int cellSize = outputSize;
1083
1084 // Decide the shape of all input tensors
Conor Kennedyb9971c92019-05-07 07:14:23 +01001085 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset); // change to ArmnnType
1086 armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1087 armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +01001088
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00001089 unsigned int scratchBufferSize = cifgEnabled ? cellSize * 3 : cellSize * 4;
Conor Kennedyb9971c92019-05-07 07:14:23 +01001090 armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, ArmnnType, qScale, qOffset);
1091 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1092 armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, ArmnnType, qScale, qOffset);
1093 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +01001094
1095 // List of inputs
1096 std::vector<float> inputData;
1097 inputData.assign(input.data(), input.data() + batchSize*inputSize);
1098 auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputData);
1099
1100 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1101 auto outputStateInTensor = MakeTensor<float, 2>(outputStateInTensorInfo, outputStateInVector);
1102
1103 std::vector<float> cellStateInVector(batchSize * cellSize, 0.f);
1104 auto cellStateInTensor = MakeTensor<float, 2>(cellStateInTensorInfo, cellStateInVector);
1105
1106
1107 // Prepare all the weights in the descriptor for LSTM
1108 armnn::LstmQueueDescriptor data;
Conor Kennedyb9971c92019-05-07 07:14:23 +01001109 armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, constantDataType, qScale, qOffset);
1110 armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, constantDataType, qScale, qOffset);
1111 armnn::TensorInfo tensorInfoNumUnits({cellSize}, constantDataType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +01001112
1113 auto inputToCellWeights = MakeTensor<float, 2>(tensorInfoInput,
1114 {-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f,
1115 0.04717243f, 0.48944736f, -0.38535351f,
1116 -0.17212132f});
1117 auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfoInput,
1118 {-0.55291498f, -0.42866567f, 0.13056988f,
1119 -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f,
1120 0.33826375f});
1121 auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfoInput,
1122 {0.10725588f, -0.02335852f, -0.55932593f,
1123 -0.09426838f, -0.44257352f, 0.54939759f,
1124 0.01533556f, 0.42751634f});
1125 auto cellBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
1126 auto forgetGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {1.f, 1.f, 1.f, 1.f});
1127 auto outputGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
1128
1129 auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfoOutput,
1130 {0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f,
1131 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f,
1132 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f,
1133 0.21193194f});
1134 auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfoOutput,
1135 {-0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f,
1136 0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f,
1137 -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f});
1138
1139 auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfoOutput,
1140 {0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f,
1141 -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1142 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f});
1143
1144 auto cellToForgetWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
1145 {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f});
1146 auto cellToOutputWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
1147 {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f});
1148
James Conroy1f58f032021-04-27 17:13:27 +01001149 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoInput);
1150 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
1151 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
telsoa01c577f2c2018-08-31 09:22:23 +01001152
James Conroy1f58f032021-04-27 17:13:27 +01001153 armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumUnits);
1154 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
1155 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
telsoa01c577f2c2018-08-31 09:22:23 +01001156
James Conroy1f58f032021-04-27 17:13:27 +01001157 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
1158 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
1159 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001160
1161
James Conroy1f58f032021-04-27 17:13:27 +01001162 armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
1163 armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
telsoa01c577f2c2018-08-31 09:22:23 +01001164
1165 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1166 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1167 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1168
1169 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1170 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1171 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1172
1173 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1174 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1175 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1176
1177 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1178 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1179
1180
1181 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1182 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1183 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1184
1185 data.m_CellBias = &cellBiasTensor;
1186 data.m_ForgetGateBias = &forgetGateBiasTensor;
1187 data.m_OutputGateBias = &outputGateBiasTensor;
1188
1189 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1190 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1191 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1192
1193 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1194 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1195
1196 // other parameters for the descriptor
1197 data.m_Parameters.m_CifgEnabled = cifgEnabled;
1198 data.m_Parameters.m_ProjectionEnabled = projectionEnabled;
1199 data.m_Parameters.m_PeepholeEnabled = peepholeEnabled;
1200
1201 data.m_Parameters.m_ActivationFunc = 4;
1202 data.m_Parameters.m_ClippingThresProj = 0.0;
1203 data.m_Parameters.m_ClippingThresCell = 0.0;
1204
1205
1206 // List of outputs
Rob Hughesbb46dde2020-05-20 15:27:37 +01001207 std::vector<T> scratchBufferVector(batchSize * scratchBufferSize, T());
1208 auto scratchBufferTensor = MakeTensor<T,2>(scratchBufferTensorInfo, scratchBufferVector);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001209 LayerTestResult<T, 2> ret0(scratchBufferTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001210
1211 // Output state for a certain time step
Rob Hughesbb46dde2020-05-20 15:27:37 +01001212 std::vector<T> outputStateOutVector(batchSize * outputSize, T());
1213 auto outputStateOutTensor = MakeTensor<T,2>(outputStateOutTensorInfo, outputStateOutVector);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001214 LayerTestResult<T, 2> ret1(outputStateOutTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001215
1216 // Cell state for a certain time step
Rob Hughesbb46dde2020-05-20 15:27:37 +01001217 std::vector<T> cellStateOutVector(batchSize * cellSize, T());
1218 auto cellStateOutTensor = MakeTensor<T,2>(cellStateOutTensorInfo, cellStateOutVector);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001219 LayerTestResult<T, 2> ret2(cellStateOutTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001220
1221 // Output for a certain time step
Rob Hughesbb46dde2020-05-20 15:27:37 +01001222 std::vector<T> outputVector(batchSize * outputSize, T());
1223 auto outputTensor = MakeTensor<T, 2>(outputTensorInfo, outputVector);
1224 std::vector<T> outputData;
telsoa01c577f2c2018-08-31 09:22:23 +01001225 outputData.assign(outputExpected.data(), outputExpected.data() + batchSize*outputSize);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001226 LayerTestResult<T, 2> ret3(outputTensorInfo);
Rob Hughesbb46dde2020-05-20 15:27:37 +01001227 ret3.outputExpected = MakeTensor<T, 2>(outputTensorInfo, outputData);
telsoa01c577f2c2018-08-31 09:22:23 +01001228
1229 // Prepare the inputs and outputs for the workload
1230 std::unique_ptr<armnn::ITensorHandle> inputHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001231 tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001232 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001233 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001234 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001235 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001236
1237 std::unique_ptr<armnn::ITensorHandle> scratchBufferHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001238 tensorHandleFactory.CreateTensorHandle(scratchBufferTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001239 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001240 tensorHandleFactory.CreateTensorHandle(outputStateOutTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001241 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001242 tensorHandleFactory.CreateTensorHandle(cellStateOutTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001243 std::unique_ptr<armnn::ITensorHandle> outputHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001244 tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001245
1246 armnn::WorkloadInfo info;
1247 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1248 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1249 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1250
1251 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchBufferHandle.get());
1252 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
1253 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
1254 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1255
1256 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1257
1258
1259 inputHandle->Allocate();
1260 outputStateInHandle->Allocate();
1261 cellStateInHandle->Allocate();
1262
1263 scratchBufferHandle->Allocate();
1264 outputStateOutHandle->Allocate();
1265 cellStateOutHandle->Allocate();
1266 outputHandle->Allocate();
1267
1268
1269 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1270 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1271 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1272
1273 CopyDataToITensorHandle(scratchBufferHandle.get(), &scratchBufferTensor[0][0]);
1274 CopyDataToITensorHandle(outputStateOutHandle.get(), &outputStateOutTensor[0][0]);
1275 CopyDataToITensorHandle(cellStateOutHandle.get(), &cellStateOutTensor[0][0]);
1276
telsoa01c577f2c2018-08-31 09:22:23 +01001277 workload->Execute();
1278
1279 CopyDataFromITensorHandle(&ret0.output[0][0], scratchBufferHandle.get());
1280 CopyDataFromITensorHandle(&ret1.output[0][0], outputStateOutHandle.get());
1281 CopyDataFromITensorHandle(&ret2.output[0][0], cellStateOutHandle.get());
1282 CopyDataFromITensorHandle(&ret3.output[0][0], outputHandle.get());
1283
1284 return ret3;
1285}
Jan Eilers38e05bd2019-06-26 13:10:09 +01001286
Jan Eilers38e05bd2019-06-26 13:10:09 +01001287template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1288LayerTestResult<T, 2>
1289LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
1290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsc43de6a2020-08-27 11:13:25 +01001291 const armnn::ITensorHandleFactory& tensorHandleFactory,
Jan Eilers38e05bd2019-06-26 13:10:09 +01001292 const boost::multi_array<T, 2>& input,
1293 const boost::multi_array<T, 2>& outputExpected,
1294 float qScale = 0.0f,
1295 int32_t qOffset = 0,
1296 armnn::DataType constantDataType = armnn::DataType::Float32)
1297{
Jan Eilers8eb25602020-03-09 12:13:48 +00001298 IgnoreUnused(memoryManager);
Jan Eilers38e05bd2019-06-26 13:10:09 +01001299 unsigned int batchSize = 2;
1300 unsigned int outputSize = 3;
1301 unsigned int inputSize = 5;
1302 unsigned numUnits = 4;
1303
1304 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset);
1305 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
1306 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
1307
1308 // Scratch buffer size without CIFG [batchSize, numUnits * 4]
1309 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
1310 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
1311 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1312 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1313
1314 LayerTestResult<T, 2> ret(outputTensorInfo);
1315
1316 std::vector<float> inputVector;
1317 inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
1318 auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
1319
1320 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1321 auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
1322
1323 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1324 auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
1325
1326 std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
1327 auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
1328
1329 std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
1330 auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
1331
1332 std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
1333 auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
1334
1335 std::vector<float> outputVector;
1336 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
1337 ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
1338
Finn Williamsc43de6a2020-08-27 11:13:25 +01001339 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Jan Eilers38e05bd2019-06-26 13:10:09 +01001340 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001341 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
Jan Eilers38e05bd2019-06-26 13:10:09 +01001342 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001343 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
Jan Eilers38e05bd2019-06-26 13:10:09 +01001344
Finn Williamsc43de6a2020-08-27 11:13:25 +01001345 std::unique_ptr<armnn::ITensorHandle> scratchHandle =
1346 tensorHandleFactory.CreateTensorHandle(scratchBufferTensorInfo);
Jan Eilers38e05bd2019-06-26 13:10:09 +01001347 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001348 tensorHandleFactory.CreateTensorHandle(outputStateOutTensorInfo);
Jan Eilers38e05bd2019-06-26 13:10:09 +01001349 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001350 tensorHandleFactory.CreateTensorHandle(cellStateOutTensorInfo);
1351 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Jan Eilers38e05bd2019-06-26 13:10:09 +01001352
1353 armnn::LstmQueueDescriptor data;
1354 armnn::WorkloadInfo info;
1355
1356 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1357 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1358 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1359
1360 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
1361 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
1362 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
1363 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1364
1365 armnn::TensorInfo tensorInfo3({outputSize}, constantDataType, qScale, qOffset);
1366 armnn::TensorInfo tensorInfo4({numUnits}, constantDataType, qScale, qOffset);
1367 armnn::TensorInfo tensorInfo4x5({numUnits, inputSize}, constantDataType, qScale, qOffset);
1368 armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, constantDataType, qScale, qOffset);
1369 armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, constantDataType, qScale, qOffset);
1370
1371 auto inputToInputWeights =
1372 MakeTensor<float, 2>(tensorInfo4x5, { 0.5f, 0.6f, 0.7f, -0.8f, -0.9f,
1373 0.1f, 0.2f, 0.3f, -0.4f, 0.5f,
1374 -0.8f, 0.7f, -0.6f, 0.5f, -0.4f,
1375 -0.5f, -0.4f, -0.3f, -0.2f, -0.1f}); //{numUnits, inputSize}
1376
1377 auto inputToForgetWeights =
1378 MakeTensor<float, 2>(tensorInfo4x5, {-0.6f, -0.1f, 0.3f, 0.2f, 0.9f,
1379 -0.5f, -0.2f, -0.4f, 0.3f, -0.8f,
1380 -0.4f, 0.3f, -0.5f, -0.4f, -0.6f,
1381 0.3f, -0.4f, -0.6f, -0.5f, -0.5f}); //{numUnits, inputSize}
1382
1383 auto inputToCellWeights =
1384 MakeTensor<float, 2>(tensorInfo4x5, {-0.4f, -0.3f, -0.2f, -0.1f, -0.5f,
1385 0.5f, -0.2f, -0.3f, -0.2f, -0.6f,
1386 0.6f, -0.1f, -0.4f, -0.3f, -0.7f,
1387 0.7f, -0.9f, -0.5f, 0.8f, 0.6f}); //{numUnits, inputSize}
1388
1389 auto inputToOutputWeights =
1390 MakeTensor<float, 2>(tensorInfo4x5, {-0.8f, -0.4f, -0.2f, -0.9f, -0.1f,
1391 -0.7f, 0.3f, -0.3f, -0.8f, -0.2f,
1392 0.6f, -0.2f, 0.4f, -0.7f, -0.3f,
1393 -0.5f, 0.1f, 0.5f, -0.6f, -0.4f}); //{numUnits, inputSize}
1394
1395 auto inputGateBias =
1396 MakeTensor<float, 1>(tensorInfo4, {0.03f, 0.15f, 0.22f, 0.38f}); //{numUnits}
1397
1398 auto forgetGateBias =
1399 MakeTensor<float, 1>(tensorInfo4, {0.1f, -0.3f, -0.2f, 0.1f}); //{numUnits}
1400
1401 auto cellBias =
1402 MakeTensor<float, 1>(tensorInfo4, {-0.05f, 0.72f, 0.25f, 0.08f}); //{numUnits}
1403
1404 auto outputGateBias =
1405 MakeTensor<float, 1>(tensorInfo4, {0.05f, -0.01f, 0.2f, 0.1f}); //{numUnits}
1406
1407 auto recurrentToInputWeights =
1408 MakeTensor<float, 2>(tensorInfo4x3, {-0.2f, -0.3f, 0.4f,
1409 0.1f, -0.5f, 0.9f,
1410 -0.2f, -0.3f, -0.7f,
1411 0.05f, -0.2f, -0.6f}); //{numUnits, outputSize}
1412
1413 auto recurrentToCellWeights =
1414 MakeTensor<float, 2>(tensorInfo4x3, {-0.3f, 0.2f, 0.1f,
1415 -0.3f, 0.8f, -0.08f,
1416 -0.2f, 0.3f, 0.8f,
1417 -0.6f, -0.1f, 0.2f}); //{numUnits, outputSize}
1418
1419 auto recurrentToForgetWeights =
1420 MakeTensor<float, 2>(tensorInfo4x3, {-0.5f, -0.3f, -0.5f,
1421 -0.2f, 0.6f, 0.4f,
1422 0.9f, 0.3f, -0.1f,
1423 0.2f, 0.5f, 0.2f}); //{numUnits, outputSize}
1424
1425 auto recurrentToOutputWeights =
1426 MakeTensor<float, 2>(tensorInfo4x3, { 0.3f, -0.1f, 0.1f,
1427 -0.2f, -0.5f, -0.7f,
1428 -0.2f, -0.6f, -0.1f,
1429 -0.4f, -0.7f, -0.2f}); //{numUnits, outputSize}
1430
1431 auto cellToInputWeights =
1432 MakeTensor<float, 1>(tensorInfo4, {0.05f, 0.1f, 0.25f, 0.15f}); //{numUnits}
1433
1434 auto cellToForgetWeights =
1435 MakeTensor<float, 1>(tensorInfo4, {-0.02f, -0.15f, -0.25f, -0.03f}); //{numUnits}
1436
1437 auto cellToOutputWeights =
1438 MakeTensor<float, 1>(tensorInfo4, {0.1f, -0.1f, -0.5f, 0.05f}); //{numUnits}
1439
1440 auto projectionWeights =
1441 MakeTensor<float, 2>(tensorInfo3x4,
1442 {-0.1f, 0.2f, 0.01f, -0.2f,
1443 0.1f, 0.5f, 0.3f, 0.08f,
1444 0.07f, 0.2f, -0.4f, 0.2f}); //{outputSize, numUnits}
1445
1446 std::vector<float> projectionBiasVector(outputSize, 0.f);
1447 auto projectionBias = MakeTensor<float,1>(tensorInfo3, projectionBiasVector); //{outputSize}
1448
1449 auto inputLayerNormWeights =
1450 MakeTensor<float, 1>(tensorInfo4, {0.1f, 0.2f, 0.3f, 0.5f}); //{numUnits}
1451
1452 auto forgetLayerNormWeights =
1453 MakeTensor<float, 1>(tensorInfo4, {0.2f, 0.2f, 0.4f, 0.3f}); //{numUnits}
1454
1455 auto cellLayerNormWeights =
1456 MakeTensor<float, 1>(tensorInfo4, {0.7f, 0.2f, 0.3f, 0.8f}); //{numUnits}
1457
1458 auto outputLayerNormWeights =
1459 MakeTensor<float, 1>(tensorInfo4, {0.6f, 0.2f, 0.2f, 0.5f}); //{numUnits}
1460
1461
James Conroy1f58f032021-04-27 17:13:27 +01001462 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
1463 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
1464 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
1465 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
1466 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
1467 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
1468 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
1469 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
1470 armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
1471 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
1472 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
1473 armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
1474 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
1475 armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
1476 armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
1477 armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo3x4);
1478 armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo3);
Jan Eilers38e05bd2019-06-26 13:10:09 +01001479
James Conroy1f58f032021-04-27 17:13:27 +01001480 armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
1481 armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
1482 armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
1483 armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
Jan Eilers38e05bd2019-06-26 13:10:09 +01001484
1485 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
1486 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1487 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1488 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1489 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
1490 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1491 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1492 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1493 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
1494 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
1495 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1496 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1497 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1498 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1499 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1500 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
1501 AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
1502
1503 AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]);
1504 AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
1505 AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
1506 AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
1507
1508 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1509 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1510 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1511 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1512 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1513 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1514 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1515 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1516 data.m_CellToInputWeights = &cellToInputWeightsTensor;
1517 data.m_InputGateBias = &inputGateBiasTensor;
1518 data.m_ForgetGateBias = &forgetGateBiasTensor;
1519 data.m_CellBias = &cellBiasTensor;
1520 data.m_OutputGateBias = &outputGateBiasTensor;
1521 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1522 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1523 data.m_ProjectionWeights = &projectionWeightsTensor;
1524 data.m_ProjectionBias = &projectionBiasTensor;
1525
1526 data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
1527 data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
1528 data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
1529 data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
1530
1531 // Flags to set test configuration
1532 data.m_Parameters.m_ActivationFunc = 4;
1533 data.m_Parameters.m_CifgEnabled = false;
1534 data.m_Parameters.m_PeepholeEnabled = true;
1535 data.m_Parameters.m_ProjectionEnabled = true;
1536 data.m_Parameters.m_LayerNormEnabled = true;
1537
1538
1539 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1540 inputHandle->Allocate();
1541 outputStateInHandle->Allocate();
1542 cellStateInHandle->Allocate();
1543
1544 scratchHandle->Allocate();
1545 outputStateOutHandle->Allocate();
1546 cellStateOutHandle->Allocate();
1547 outputHandle->Allocate();
1548
1549 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1550 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1551 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1552
1553 workload->Execute();
1554
1555 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1556
1557 return ret;
James Conroy9c3cae82019-08-01 16:01:48 +01001558}
1559
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001560LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
1561 armnn::IWorkloadFactory& workloadFactory,
1562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsc43de6a2020-08-27 11:13:25 +01001563 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001564 const boost::multi_array<uint8_t, 2>& input,
1565 const boost::multi_array<uint8_t, 2>& outputExpected)
James Conroy9c3cae82019-08-01 16:01:48 +01001566{
Jan Eilers8eb25602020-03-09 12:13:48 +00001567 IgnoreUnused(memoryManager);
Matthew Sloyan171214c2020-09-09 09:07:37 +01001568 auto numBatches = armnn::numeric_cast<unsigned int>(input.shape()[0]);
1569 auto inputSize = armnn::numeric_cast<unsigned int>(input.shape()[1]);
1570 auto outputSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[1]);
James Conroy9c3cae82019-08-01 16:01:48 +01001571
1572 // Scale/Offset for input/output, cellState In/Out, weights, bias
1573 float inputOutputScale = 0.0078125f;
1574 int32_t inputOutputOffset = 128;
1575
1576 float cellStateScale = 0.00048828125f;
1577 int32_t cellStateOffset = 0;
1578
1579 float weightsScale = 0.00408021f;
1580 int32_t weightsOffset = 100;
1581
1582 float biasScale = 3.1876640625e-05f;
1583 int32_t biasOffset = 0;
1584
1585 // Input/Output tensor info
1586 armnn::TensorInfo inputInfo({numBatches , inputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +00001587 armnn::DataType::QAsymmU8,
James Conroy9c3cae82019-08-01 16:01:48 +01001588 inputOutputScale,
1589 inputOutputOffset);
1590
1591 armnn::TensorInfo cellStateInfo({numBatches , outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +00001592 armnn::DataType::QSymmS16,
James Conroy9c3cae82019-08-01 16:01:48 +01001593 cellStateScale,
1594 cellStateOffset);
1595
1596 armnn::TensorInfo outputStateInfo({numBatches , outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +00001597 armnn::DataType::QAsymmU8,
James Conroy9c3cae82019-08-01 16:01:48 +01001598 inputOutputScale,
1599 inputOutputOffset);
1600
1601 LayerTestResult<uint8_t, 2> ret(outputStateInfo);
1602
1603 // Input0
1604 std::vector<uint8_t> inputVector;
1605 inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
1606 auto inputTensor = MakeTensor<uint8_t, 2>(inputInfo, inputVector);
1607
1608 // Input1
1609 std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036}; // 13
1610 auto cellStateInTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
1611
1612 // Input2
1613 std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112}; // 14
1614 auto outputStateInTensor = MakeTensor<uint8_t, 2>(outputStateInfo, outputStateInVector);
1615
1616 // Output0
1617 std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235}; // 0
1618 auto cellStateOutTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
1619
1620 // Output1
1621 std::vector<uint8_t> outputVector; // 1
1622 outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
1623 ret.outputExpected = MakeTensor<uint8_t, 2>(outputStateInfo, outputVector);
1624
1625 // Create tensor handles
Finn Williamsc43de6a2020-08-27 11:13:25 +01001626 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
James Conroy9c3cae82019-08-01 16:01:48 +01001627 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001628 tensorHandleFactory.CreateTensorHandle(cellStateInfo);
James Conroy9c3cae82019-08-01 16:01:48 +01001629 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001630 tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroy9c3cae82019-08-01 16:01:48 +01001631
1632 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001633 tensorHandleFactory.CreateTensorHandle(cellStateInfo);
1634 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroy9c3cae82019-08-01 16:01:48 +01001635
1636 armnn::QuantizedLstmQueueDescriptor data;
1637 armnn::WorkloadInfo info;
1638
1639 // Add inputs and outputs to workload
1640 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
1641 AddInputToWorkload(data, info, cellStateInfo, cellStateInHandle.get());
1642 AddInputToWorkload(data, info, outputStateInfo, outputStateInHandle.get());
1643
1644 AddOutputToWorkload(data, info, cellStateInfo, cellStateOutHandle.get());
1645 AddOutputToWorkload(data, info, outputStateInfo, outputHandle.get());
1646
1647 // Weights and bias tensor and quantization info
1648 armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +00001649 armnn::DataType::QAsymmU8,
James Conroy9c3cae82019-08-01 16:01:48 +01001650 weightsScale,
1651 weightsOffset);
1652
1653 armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
Derek Lambertif90c56d2020-01-10 17:14:08 +00001654 armnn::DataType::QAsymmU8,
James Conroy9c3cae82019-08-01 16:01:48 +01001655 weightsScale,
1656 weightsOffset);
1657
1658 armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
1659
1660 // Weights and bias tensor data
1661 auto inputToInputWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {146, 250, 235, 171, 10, 218, 171, 108});
1662 auto inputToForgetWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {24, 50, 132, 179, 158, 110, 3, 169});
1663 auto inputToCellWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {133, 34, 29, 49, 206, 109, 54, 183});
1664 auto inputToOutputWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {195, 187, 11, 99, 109, 10, 218, 48});
1665
1666 auto recurrentToInputWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1667 {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26});
1668 auto recurrentToForgetWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1669 {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253});
1670 auto recurrentToCellWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1671 {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216});
1672 auto recurrentToOutputWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1673 {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98});
1674
1675 auto inputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-7876, 13488, -726, 32839});
1676 auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {9206, -46884, -11693, -38724});
1677 auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {39481, 48624, 48976, -21419});
1678 auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-58999, -17050, -41852, -40538});
1679
James Conroy1f58f032021-04-27 17:13:27 +01001680 // ScopedTensorHandles
1681 armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
1682 armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
1683 armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
1684 armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
James Conroy9c3cae82019-08-01 16:01:48 +01001685
James Conroy1f58f032021-04-27 17:13:27 +01001686 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
1687 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
1688 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
1689 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
James Conroy9c3cae82019-08-01 16:01:48 +01001690
James Conroy1f58f032021-04-27 17:13:27 +01001691 armnn::ScopedTensorHandle inputGateBiasTensor(biasInfo);
1692 armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
1693 armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
1694 armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
James Conroy9c3cae82019-08-01 16:01:48 +01001695
1696 // Allocate and copy data
1697 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
1698 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1699 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1700 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1701
1702 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
1703 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1704 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1705 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1706
1707 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
1708 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1709 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1710 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1711
1712 // Setup queue descriptor
1713 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1714 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1715 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1716 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1717
1718 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1719 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1720 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1721 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1722
1723 data.m_InputGateBias = &inputGateBiasTensor;
1724 data.m_ForgetGateBias = &forgetGateBiasTensor;
1725 data.m_CellBias = &cellBiasTensor;
1726 data.m_OutputGateBias = &outputGateBiasTensor;
1727
1728 // Create workload and allocate tensor handles
1729 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantizedLstm(data, info);
1730 inputHandle->Allocate();
1731 outputStateInHandle->Allocate();
1732 cellStateInHandle->Allocate();
1733
1734 cellStateOutHandle->Allocate();
1735 outputHandle->Allocate();
1736
1737 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1738 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1739 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1740
1741 workload->Execute();
1742
1743 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1744
1745 return ret;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001746}
1747
James Conroyb22a75e2020-06-08 14:53:10 +01001748// QLSTM: CIFG, LayerNorm
James Conroy4f1f8992020-04-29 20:01:10 +01001749LayerTestResult<int8_t, 2> QLstmTestImpl(
1750 armnn::IWorkloadFactory& workloadFactory,
1751 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsc43de6a2020-08-27 11:13:25 +01001752 const armnn::ITensorHandleFactory& tensorHandleFactory,
James Conroy4f1f8992020-04-29 20:01:10 +01001753 const boost::multi_array<int8_t, 2>& input,
1754 const boost::multi_array<int8_t, 2>& outputExpected)
1755{
1756 IgnoreUnused(memoryManager);
1757 unsigned int numBatches = 2;
1758 unsigned int inputSize = 5;
1759 unsigned int outputSize = 4;
1760 unsigned int numUnits = 4;
1761
1762 bool cifgEnabled = true;
1763 bool peepholeEnabled = false;
1764 bool projectionEnabled = false;
1765 bool layerNormEnabled = true;
1766
1767 // Scale/Offset quantization info
1768 float inputScale = 0.0078125f;
1769 int32_t inputOffset = 0;
1770
1771 int32_t hiddenStateZeroPoint = 0;
1772 float hiddenStateScale = 0.007f;
1773
1774 // if (!projectionEnabled) outputScale == hiddenStateScale
1775 float outputScale = hiddenStateScale;
1776 int32_t outputOffset = hiddenStateZeroPoint;
1777
1778 float cellStateScale = 3.05176e-05f;
1779 int32_t cellStateOffset = 0;
1780
1781 float weightsScale = 0.00784314f;
1782 int32_t weightsOffset = 0;
1783
1784 float layerNormScale = 3.05182e-05f;
1785 int32_t layerNormOffset = 0;
1786
1787 float biasScale = layerNormScale / 1024;
1788 int32_t biasOffset = 0;
1789
1790 float inputIntermediateScale = 0.007059f;
1791 float forgetIntermediateScale = 0.007812f;
1792 float cellIntermediateScale = inputIntermediateScale;
1793 float outputIntermediateScale = forgetIntermediateScale;
1794
1795 float cellClip = 0.0f;
1796 float projectionClip = 0.0f;
1797
1798 // Input/Output tensor info
1799 armnn::TensorInfo inputInfo({numBatches , inputSize},
1800 armnn::DataType::QAsymmS8,
1801 inputScale,
1802 inputOffset);
1803
1804 armnn::TensorInfo cellStateInfo({numBatches , numUnits},
1805 armnn::DataType::QSymmS16,
1806 cellStateScale,
1807 cellStateOffset);
1808
1809 armnn::TensorInfo outputStateInfo({numBatches , outputSize},
1810 armnn::DataType::QAsymmS8,
1811 outputScale,
1812 outputOffset);
1813
1814 LayerTestResult<int8_t, 2> ret(outputStateInfo);
1815
1816 // Input tensors
1817 std::vector<int8_t> inputVector;
1818 inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
1819 auto inputTensor = MakeTensor<int8_t, 2>(inputInfo, inputVector);
1820
1821 std::vector<int16_t> cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
1822 auto cellStateInTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
1823
Teresa Charlinbe727be2020-09-25 15:08:21 +01001824 std::vector<int8_t> outputStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
James Conroy4f1f8992020-04-29 20:01:10 +01001825 auto outputStateInTensor = MakeTensor<int8_t, 2>(outputStateInfo, outputStateInVector);
1826
1827 // Output tensors
1828 std::vector<int16_t> cellStateOutVector = {-11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149};
1829 auto cellStateOutTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
1830
1831 std::vector<int8_t> outputVector;
1832 outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
1833 ret.outputExpected = MakeTensor<int8_t, 2>(outputStateInfo, outputVector);
1834
1835 // Create tensor handles
Finn Williamsc43de6a2020-08-27 11:13:25 +01001836 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
James Conroy4f1f8992020-04-29 20:01:10 +01001837 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001838 tensorHandleFactory.CreateTensorHandle(cellStateInfo);
James Conroy4f1f8992020-04-29 20:01:10 +01001839 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001840 tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroy4f1f8992020-04-29 20:01:10 +01001841
Finn Williamsc43de6a2020-08-27 11:13:25 +01001842 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
1843 tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroy4f1f8992020-04-29 20:01:10 +01001844 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01001845 tensorHandleFactory.CreateTensorHandle(cellStateInfo);
1846 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroy4f1f8992020-04-29 20:01:10 +01001847
1848 armnn::QLstmQueueDescriptor data;
1849 armnn::WorkloadInfo info;
1850
1851 // Add inputs and outputs to workload
1852 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
1853 AddInputToWorkload(data, info, outputStateInfo, outputStateInHandle.get());
1854 AddInputToWorkload(data, info, cellStateInfo, cellStateInHandle.get());
1855
1856 AddOutputToWorkload(data, info, outputStateInfo, outputStateOutHandle.get());
1857 AddOutputToWorkload(data, info, cellStateInfo, cellStateOutHandle.get());
1858 AddOutputToWorkload(data, info, outputStateInfo, outputHandle.get());
1859
1860 // Weights and bias tensor and quantization info
1861 armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
1862 armnn::DataType::QSymmS8,
1863 weightsScale,
1864 weightsOffset);
1865
1866 armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
1867 armnn::DataType::QSymmS8,
1868 weightsScale,
1869 weightsOffset);
1870
1871 armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
1872
1873 armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset);
1874
1875 // Weights and bias tensor data
1876 auto inputToForgetWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
1877 {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64});
1878 auto inputToCellWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
1879 {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77});
1880 auto inputToOutputWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
1881 {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51});
1882
1883 auto recurrentToForgetWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
1884 {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25, 25, 38, -13, 51});
1885 auto recurrentToCellWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
1886 {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25, 38, -13, 25, 64});
1887 auto recurrentToOutputWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
1888 {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25, 13, 64, 25, -38});
1889
1890 auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {2147484, -6442451, -4294968, 2147484});
1891 auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {-1073742, 15461883, 5368709, 1717987});
1892 auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {1073742, -214748, 4294968, 2147484});
1893
1894 auto forgetLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {6553, 6553, 13107, 9830});
1895 auto cellLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {22937, 6553, 9830, 26214});
1896 auto outputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {19660, 6553, 6553, 16384});
1897
James Conroy1f58f032021-04-27 17:13:27 +01001898 // ScopedTensorHandles
1899 armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
1900 armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
1901 armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
James Conroy4f1f8992020-04-29 20:01:10 +01001902
James Conroy1f58f032021-04-27 17:13:27 +01001903 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
1904 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
1905 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
James Conroy4f1f8992020-04-29 20:01:10 +01001906
James Conroy1f58f032021-04-27 17:13:27 +01001907 armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
1908 armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
1909 armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
James Conroy4f1f8992020-04-29 20:01:10 +01001910
James Conroy1f58f032021-04-27 17:13:27 +01001911 armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
1912 armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
1913 armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
James Conroy4f1f8992020-04-29 20:01:10 +01001914
1915 // Allocate and copy data
1916 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1917 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1918 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1919
1920 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1921 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1922 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1923
1924 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1925 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1926 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1927
1928 AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
1929 AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
1930 AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
1931
1932 // Setup queue descriptor
1933 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1934 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1935 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1936
1937 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1938 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1939 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1940
1941 data.m_ForgetGateBias = &forgetGateBiasTensor;
1942 data.m_CellBias = &cellBiasTensor;
1943 data.m_OutputGateBias = &outputGateBiasTensor;
1944
1945 data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
1946 data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
1947 data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
1948
1949 data.m_Parameters.m_CifgEnabled = cifgEnabled;
1950 data.m_Parameters.m_PeepholeEnabled = peepholeEnabled;
1951 data.m_Parameters.m_ProjectionEnabled = projectionEnabled;
1952 data.m_Parameters.m_LayerNormEnabled = layerNormEnabled;
1953
1954 data.m_Parameters.m_InputIntermediateScale = inputIntermediateScale;
1955 data.m_Parameters.m_ForgetIntermediateScale = forgetIntermediateScale;
1956 data.m_Parameters.m_CellIntermediateScale = cellIntermediateScale;
1957 data.m_Parameters.m_OutputIntermediateScale = outputIntermediateScale;
1958
1959 data.m_Parameters.m_HiddenStateZeroPoint = hiddenStateZeroPoint;
1960 data.m_Parameters.m_HiddenStateScale = hiddenStateScale;
1961
1962 data.m_Parameters.m_CellClip = cellClip;
1963 data.m_Parameters.m_ProjectionClip = projectionClip;
1964
1965 // Create workload and allocate tensor handles
1966 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQLstm(data, info);
1967 inputHandle->Allocate();
1968 outputStateInHandle->Allocate();
1969 cellStateInHandle->Allocate();
1970
1971 outputStateOutHandle->Allocate();
1972 cellStateOutHandle->Allocate();
1973 outputHandle->Allocate();
1974
1975 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1976 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1977 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1978
1979 workload->Execute();
1980
1981 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1982
1983 return ret;
1984}
1985
James Conroyb22a75e2020-06-08 14:53:10 +01001986// QLSTM: Projection, LayerNorm
1987LayerTestResult<int8_t, 2> QLstmTestImpl1(
1988 armnn::IWorkloadFactory& workloadFactory,
1989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsc43de6a2020-08-27 11:13:25 +01001990 const armnn::ITensorHandleFactory& tensorHandleFactory,
James Conroyb22a75e2020-06-08 14:53:10 +01001991 const boost::multi_array<int8_t, 2>& input,
1992 const boost::multi_array<int8_t, 2>& outputExpected)
1993{
1994 IgnoreUnused(memoryManager);
1995 unsigned int numBatches = 2;
1996 unsigned int inputSize = 5;
1997 unsigned int outputSize = 3;
1998 unsigned int numUnits = 4;
1999
2000 bool cifgEnabled = false;
2001 bool peepholeEnabled = false;
2002 bool projectionEnabled = true;
2003 bool layerNormEnabled = true;
2004
2005 // Scale/Offset quantization info
2006 float inputScale = 0.0078125f;
2007 int32_t inputOffset = 0;
2008
2009 int32_t hiddenStateZeroPoint = 0;
2010 float hiddenStateScale = 0.007f;
2011
2012 // if (!projectionEnabled) outputScale == hiddenStateScale
2013 float outputScale = 3.05176e-05f;
2014 int32_t outputOffset = 0;
2015
2016 float cellStateScale = 3.05176e-05f;
2017 int32_t cellStateOffset = 0;
2018
2019 float weightsScale = 0.00784314f;
2020 int32_t weightsOffset = 0;
2021
2022 float layerNormScale = 3.05182e-05f;
2023 int32_t layerNormOffset = 0;
2024
2025 float biasScale = layerNormScale / 1024;
2026 int32_t biasOffset = 0;
2027
2028 float projectionWeightsScale = 0.00392157f;
2029
2030 float inputIntermediateScale = 0.007059f;
2031 float forgetIntermediateScale = 0.007812f;
2032 float cellIntermediateScale = inputIntermediateScale;
2033 float outputIntermediateScale = forgetIntermediateScale;
2034
2035 float cellClip = 0.0f;
2036 float projectionClip = 0.0f;
2037
2038 // Input/Output tensor info
2039 armnn::TensorInfo inputInfo({numBatches , inputSize},
2040 armnn::DataType::QAsymmS8,
2041 inputScale,
2042 inputOffset);
2043
2044 armnn::TensorInfo cellStateInfo({numBatches , numUnits},
2045 armnn::DataType::QSymmS16,
2046 cellStateScale,
2047 cellStateOffset);
2048
2049 armnn::TensorInfo outputStateInfo({numBatches , outputSize},
2050 armnn::DataType::QAsymmS8,
2051 outputScale,
2052 outputOffset);
2053
2054 LayerTestResult<int8_t, 2> ret(outputStateInfo);
2055
2056 // Input tensors
2057 std::vector<int8_t> inputVector;
2058 inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
2059 auto inputTensor = MakeTensor<int8_t, 2>(inputInfo, inputVector);
2060
2061 std::vector<int16_t> cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
2062 auto cellStateInTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
2063
2064 std::vector<int8_t> outputStateInVector = {0, 0, 0, 0, 0, 0};
2065 auto outputStateInTensor = MakeTensor<int8_t, 2>(outputStateInfo, outputStateInVector);
2066
2067 // Output tensors
2068 std::vector<int16_t> cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939};
2069 auto cellStateOutTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
2070
2071 std::vector<int8_t> outputVector;
2072 outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
2073 ret.outputExpected = MakeTensor<int8_t, 2>(outputStateInfo, outputVector);
2074
2075 // Create tensor handles
Finn Williamsc43de6a2020-08-27 11:13:25 +01002076 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002077 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01002078 tensorHandleFactory.CreateTensorHandle(cellStateInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002079 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01002080 tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002081
Finn Williamsc43de6a2020-08-27 11:13:25 +01002082 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
2083 tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002084 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01002085 tensorHandleFactory.CreateTensorHandle(cellStateInfo);
2086 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002087
2088 armnn::QLstmQueueDescriptor data;
2089 armnn::WorkloadInfo info;
2090
2091 // Add inputs and outputs to workload
2092 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
2093 AddInputToWorkload(data, info, outputStateInfo, outputStateInHandle.get());
2094 AddInputToWorkload(data, info, cellStateInfo, cellStateInHandle.get());
2095
2096 AddOutputToWorkload(data, info, outputStateInfo, outputStateOutHandle.get());
2097 AddOutputToWorkload(data, info, cellStateInfo, cellStateOutHandle.get());
2098 AddOutputToWorkload(data, info, outputStateInfo, outputHandle.get());
2099
2100 // Weights and bias tensor and quantization info
2101 armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
2102 armnn::DataType::QSymmS8,
2103 weightsScale,
2104 weightsOffset);
2105
2106 armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
2107 armnn::DataType::QSymmS8,
2108 weightsScale,
2109 weightsOffset);
2110
2111 armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset);
2112
2113 armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset);
2114
2115 armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits},
2116 armnn::DataType::QSymmS8,
2117 projectionWeightsScale,
2118 0);
2119
2120 // Weights and bias tensor data
2121 auto inputToInputWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
2122 {64, 77, 89, -102, -115, 13, 25, 38, -51, 64, -102, 89, -77, 64, -51, -64, -51, -38, -25, -13});
2123 auto inputToForgetWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
2124 {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64});
2125 auto inputToCellWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
2126 {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77});
2127 auto inputToOutputWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
2128 {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51});
2129
2130 auto recurrentToInputWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
2131 {-25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77});
2132 auto recurrentToForgetWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
2133 {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25});
2134 auto recurrentToCellWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
2135 {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25});
2136 auto recurrentToOutputWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
2137 {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25});
2138
2139 auto inputGateBias = MakeTensor<int32_t, 1>(biasInfo, {644245, 3221226, 4724464, 8160438});
2140 auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {2147484, -6442451, -4294968, 2147484});
2141 auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {-1073742, 15461883, 5368709, 1717987});
2142 auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {1073742, -214748, 4294968, 2147484});
2143
2144 auto inputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {3277, 6553, 9830, 16384});
2145 auto forgetLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {6553, 6553, 13107, 9830});
2146 auto cellLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {22937, 6553, 9830, 26214});
2147 auto outputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {19660, 6553, 6553, 16384});
2148
2149 auto projectionWeights = MakeTensor<int8_t, 2>(projectionWeightsInfo,
2150 {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51});
2151
James Conroy1f58f032021-04-27 17:13:27 +01002152 // ScopedTensorHandles
2153 armnn::ScopedTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
2154 armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
2155 armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
2156 armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002157
James Conroy1f58f032021-04-27 17:13:27 +01002158 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
2159 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
2160 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
2161 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002162
James Conroy1f58f032021-04-27 17:13:27 +01002163 armnn::ScopedTensorHandle inputGateBiasTensor(biasInfo);
2164 armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
2165 armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
2166 armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002167
James Conroy1f58f032021-04-27 17:13:27 +01002168 armnn::ScopedTensorHandle inputLayerNormWeightsTensor(layerNormWeightsInfo);
2169 armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
2170 armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
2171 armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002172
James Conroy1f58f032021-04-27 17:13:27 +01002173 armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002174
2175 // Allocate and copy data
2176 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
2177 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
2178 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
2179 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
2180
2181 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
2182 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
2183 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
2184 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
2185
2186 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
2187 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
2188 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
2189 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
2190
2191 AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]);
2192 AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
2193 AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
2194 AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
2195
2196 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
2197
2198 // Setup queue descriptor
2199 data.m_InputToInputWeights = &inputToInputWeightsTensor;
2200 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
2201 data.m_InputToCellWeights = &inputToCellWeightsTensor;
2202 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
2203
2204 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
2205 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
2206 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
2207 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
2208
2209 data.m_InputGateBias = &inputGateBiasTensor;
2210 data.m_ForgetGateBias = &forgetGateBiasTensor;
2211 data.m_CellBias = &cellBiasTensor;
2212 data.m_OutputGateBias = &outputGateBiasTensor;
2213
2214 data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
2215 data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
2216 data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
2217 data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
2218
2219 data.m_ProjectionWeights = &projectionWeightsTensor;
2220
2221 data.m_Parameters.m_CifgEnabled = cifgEnabled;
2222 data.m_Parameters.m_PeepholeEnabled = peepholeEnabled;
2223 data.m_Parameters.m_ProjectionEnabled = projectionEnabled;
2224 data.m_Parameters.m_LayerNormEnabled = layerNormEnabled;
2225
2226 data.m_Parameters.m_InputIntermediateScale = inputIntermediateScale;
2227 data.m_Parameters.m_ForgetIntermediateScale = forgetIntermediateScale;
2228 data.m_Parameters.m_CellIntermediateScale = cellIntermediateScale;
2229 data.m_Parameters.m_OutputIntermediateScale = outputIntermediateScale;
2230
2231 data.m_Parameters.m_HiddenStateZeroPoint = hiddenStateZeroPoint;
2232 data.m_Parameters.m_HiddenStateScale = hiddenStateScale;
2233
2234 data.m_Parameters.m_CellClip = cellClip;
2235 data.m_Parameters.m_ProjectionClip = projectionClip;
2236
2237 // Create workload and allocate tensor handles
2238 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQLstm(data, info);
2239 inputHandle->Allocate();
2240 outputStateInHandle->Allocate();
2241 cellStateInHandle->Allocate();
2242
2243 outputStateOutHandle->Allocate();
2244 cellStateOutHandle->Allocate();
2245 outputHandle->Allocate();
2246
2247 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
2248 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
2249 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
2250
2251 workload->Execute();
2252
2253 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
2254
2255 return ret;
2256}
2257
2258// QLSTM: Projection, CIFG, LayerNorm
2259LayerTestResult<int8_t, 2> QLstmTestImpl2(
2260 armnn::IWorkloadFactory& workloadFactory,
2261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williamsc43de6a2020-08-27 11:13:25 +01002262 const armnn::ITensorHandleFactory& tensorHandleFactory,
James Conroyb22a75e2020-06-08 14:53:10 +01002263 const boost::multi_array<int8_t, 2>& input,
2264 const boost::multi_array<int8_t, 2>& outputExpected)
2265{
2266 IgnoreUnused(memoryManager);
2267 unsigned int numBatches = 2;
2268 unsigned int inputSize = 5;
2269 unsigned int outputSize = 3;
2270 unsigned int numUnits = 4;
2271
2272 bool cifgEnabled = true;
2273 bool peepholeEnabled = false;
2274 bool projectionEnabled = true;
2275 bool layerNormEnabled = true;
2276
2277 // Scale/Offset quantization info
2278 float inputScale = 0.0078125f;
2279 int32_t inputOffset = 0;
2280
2281 int32_t hiddenStateZeroPoint = 0;
2282 float hiddenStateScale = 0.007f;
2283
2284 // if (!projectionEnabled) outputScale == hiddenStateScale
2285 float outputScale = 3.05176e-05f;
2286 int32_t outputOffset = 0;
2287
2288 float cellStateScale = 3.05176e-05f;
2289 int32_t cellStateOffset = 0;
2290
2291 float weightsScale = 0.00784314f;
2292 int32_t weightsOffset = 0;
2293
2294 float layerNormScale = 3.05182e-05f;
2295 int32_t layerNormOffset = 0;
2296
2297 float biasScale = layerNormScale / 1024;
2298 int32_t biasOffset = 0;
2299
2300 float projectionWeightsScale = 0.00392157f;
2301
2302 float inputIntermediateScale = 0.007059f;
2303 float forgetIntermediateScale = 0.007812f;
2304 float cellIntermediateScale = inputIntermediateScale;
2305 float outputIntermediateScale = forgetIntermediateScale;
2306
2307 float cellClip = 0.0f;
2308 float projectionClip = 0.0f;
2309
2310 // Input/Output tensor info
2311 armnn::TensorInfo inputInfo({numBatches , inputSize},
2312 armnn::DataType::QAsymmS8,
2313 inputScale,
2314 inputOffset);
2315
2316 armnn::TensorInfo cellStateInfo({numBatches , numUnits},
2317 armnn::DataType::QSymmS16,
2318 cellStateScale,
2319 cellStateOffset);
2320
2321 armnn::TensorInfo outputStateInfo({numBatches , outputSize},
2322 armnn::DataType::QAsymmS8,
2323 outputScale,
2324 outputOffset);
2325
2326 LayerTestResult<int8_t, 2> ret(outputStateInfo);
2327
2328 // Input tensors
2329 std::vector<int8_t> inputVector;
2330 inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
2331 auto inputTensor = MakeTensor<int8_t, 2>(inputInfo, inputVector);
2332
2333 std::vector<int16_t> cellStateInVector = {0, 0, 0, 0, 0, 0, 0, 0};
2334 auto cellStateInTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
2335
2336 std::vector<int8_t> outputStateInVector = {0, 0, 0, 0, 0, 0};
2337 auto outputStateInTensor = MakeTensor<int8_t, 2>(outputStateInfo, outputStateInVector);
2338
2339 // Output tensors
2340 std::vector<int16_t> cellStateOutVector = {-14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939};
2341 auto cellStateOutTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
2342
2343 std::vector<int8_t> outputVector;
2344 outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
2345 ret.outputExpected = MakeTensor<int8_t, 2>(outputStateInfo, outputVector);
2346
2347 // Create tensor handles
Finn Williamsc43de6a2020-08-27 11:13:25 +01002348 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002349 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01002350 tensorHandleFactory.CreateTensorHandle(cellStateInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002351 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01002352 tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002353
Finn Williamsc43de6a2020-08-27 11:13:25 +01002354 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
2355 tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002356 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
Finn Williamsc43de6a2020-08-27 11:13:25 +01002357 tensorHandleFactory.CreateTensorHandle(cellStateInfo);
2358 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputStateInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002359
2360 armnn::QLstmQueueDescriptor data;
2361 armnn::WorkloadInfo info;
2362
2363 // Add inputs and outputs to workload
2364 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
2365 AddInputToWorkload(data, info, outputStateInfo, outputStateInHandle.get());
2366 AddInputToWorkload(data, info, cellStateInfo, cellStateInHandle.get());
2367
2368 AddOutputToWorkload(data, info, outputStateInfo, outputStateOutHandle.get());
2369 AddOutputToWorkload(data, info, cellStateInfo, cellStateOutHandle.get());
2370 AddOutputToWorkload(data, info, outputStateInfo, outputHandle.get());
2371
2372 // Weights and bias tensor and quantization info
2373 armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
2374 armnn::DataType::QSymmS8,
2375 weightsScale,
2376 weightsOffset);
2377
2378 armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
2379 armnn::DataType::QSymmS8,
2380 weightsScale,
2381 weightsOffset);
2382
2383 armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset);
2384
2385 armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset);
2386
2387 armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits},
2388 armnn::DataType::QSymmS8,
2389 projectionWeightsScale,
2390 0);
2391
2392 // Weights and bias tensor data
2393 auto inputToForgetWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
2394 {-77, -13, 38, 25, 115, -64, -25, -51, 38, -102, -51, 38, -64, -51, -77, 38, -51, -77, -64, -64});
2395 auto inputToCellWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
2396 {-51, -38, -25, -13, -64, 64, -25, -38, -25, -77, 77, -13, -51, -38, -89, 89, -115, -64, 102, 77});
2397 auto inputToOutputWeights = MakeTensor<int8_t, 2>(inputWeightsInfo,
2398 {-102, -51, -25, -115, -13, -89, 38, -38, -102, -25, 77, -25, 51, -89, -38, -64, 13, 64, -77, -51});
2399
2400 auto recurrentToForgetWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
2401 {-64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25});
2402 auto recurrentToCellWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
2403 {-38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25});
2404 auto recurrentToOutputWeights = MakeTensor<int8_t, 2>(recurrentWeightsInfo,
2405 {38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25});
2406
2407 auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {2147484, -6442451, -4294968, 2147484});
2408 auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {-1073742, 15461883, 5368709, 1717987});
2409 auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {1073742, -214748, 4294968, 2147484});
2410
2411 auto forgetLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {6553, 6553, 13107, 9830});
2412 auto cellLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {22937, 6553, 9830, 26214});
2413 auto outputLayerNormWeights = MakeTensor<int16_t, 1>(layerNormWeightsInfo, {19660, 6553, 6553, 16384});
2414
2415 auto projectionWeights = MakeTensor<int8_t, 2>(projectionWeightsInfo,
2416 {-25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51});
2417
James Conroy1f58f032021-04-27 17:13:27 +01002418 // ScopedTensorHandles
2419 armnn::ScopedTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
2420 armnn::ScopedTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
2421 armnn::ScopedTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002422
James Conroy1f58f032021-04-27 17:13:27 +01002423 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
2424 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
2425 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002426
James Conroy1f58f032021-04-27 17:13:27 +01002427 armnn::ScopedTensorHandle forgetGateBiasTensor(biasInfo);
2428 armnn::ScopedTensorHandle cellBiasTensor(biasInfo);
2429 armnn::ScopedTensorHandle outputGateBiasTensor(biasInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002430
James Conroy1f58f032021-04-27 17:13:27 +01002431 armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(layerNormWeightsInfo);
2432 armnn::ScopedTensorHandle cellLayerNormWeightsTensor(layerNormWeightsInfo);
2433 armnn::ScopedTensorHandle outputLayerNormWeightsTensor(layerNormWeightsInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002434
James Conroy1f58f032021-04-27 17:13:27 +01002435 armnn::ScopedTensorHandle projectionWeightsTensor(projectionWeightsInfo);
James Conroyb22a75e2020-06-08 14:53:10 +01002436
2437 // Allocate and copy data
2438 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
2439 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
2440 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
2441
2442 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
2443 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
2444 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
2445
2446 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
2447 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
2448 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
2449
2450 AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
2451 AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
2452 AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
2453
2454 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
2455
2456 // Setup queue descriptor
2457 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
2458 data.m_InputToCellWeights = &inputToCellWeightsTensor;
2459 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
2460
2461 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
2462 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
2463 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
2464
2465 data.m_ForgetGateBias = &forgetGateBiasTensor;
2466 data.m_CellBias = &cellBiasTensor;
2467 data.m_OutputGateBias = &outputGateBiasTensor;
2468
2469 data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
2470 data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
2471 data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
2472
2473 data.m_ProjectionWeights = &projectionWeightsTensor;
2474
2475 data.m_Parameters.m_CifgEnabled = cifgEnabled;
2476 data.m_Parameters.m_PeepholeEnabled = peepholeEnabled;
2477 data.m_Parameters.m_ProjectionEnabled = projectionEnabled;
2478 data.m_Parameters.m_LayerNormEnabled = layerNormEnabled;
2479
2480 data.m_Parameters.m_InputIntermediateScale = inputIntermediateScale;
2481 data.m_Parameters.m_ForgetIntermediateScale = forgetIntermediateScale;
2482 data.m_Parameters.m_CellIntermediateScale = cellIntermediateScale;
2483 data.m_Parameters.m_OutputIntermediateScale = outputIntermediateScale;
2484
2485 data.m_Parameters.m_HiddenStateZeroPoint = hiddenStateZeroPoint;
2486 data.m_Parameters.m_HiddenStateScale = hiddenStateScale;
2487
2488 data.m_Parameters.m_CellClip = cellClip;
2489 data.m_Parameters.m_ProjectionClip = projectionClip;
2490
2491 // Create workload and allocate tensor handles
2492 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQLstm(data, info);
2493 inputHandle->Allocate();
2494 outputStateInHandle->Allocate();
2495 cellStateInHandle->Allocate();
2496
2497 outputStateOutHandle->Allocate();
2498 cellStateOutHandle->Allocate();
2499 outputHandle->Allocate();
2500
2501 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
2502 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
2503 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
2504
2505 workload->Execute();
2506
2507 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
2508
2509 return ret;
2510}
2511
James Conroy4f1f8992020-04-29 20:01:10 +01002512
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002513} // anonymous namespace
2514
2515#if defined(ARMNNREF_ENABLED)
2516
2517// The LSTM test units are run only for the reference backend at the moment
2518
2519void LstmUtilsZeroVectorTest()
2520{
2521 armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
2522 boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2523 {2., 3., 3., 4.}));
2524
2525 boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2526 {0., 0., 0., 0.}));
2527
2528 return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
2529}
2530
2531void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
2532{
2533 uint32_t batchSize = 2;
2534 uint32_t vecSize = 4;
2535 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2536 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2537 { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
2538 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
2539
2540 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2541 { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
2542 -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
2543
2544 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2545 vecSize, batchSize, expectedOutput);
2546}
2547
2548void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
2549{
2550 uint32_t batchSize = 2;
2551 uint32_t vecSize = 4;
2552 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2553 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2554 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2555 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
2556
2557 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2558 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2559 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
2560
2561 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2562 vecSize, batchSize, expectedOutput);
2563}
2564
2565void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
2566{
2567 uint32_t batchSize = 2;
2568 uint32_t vecSize = 4;
2569 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2570 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2571 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2572 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
2573
2574 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2575 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2576 -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
2577
2578 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2579 vecSize, batchSize, expectedOutput);
2580}
2581
2582void LstmUtilsVectorBatchVectorCwiseProductTest()
2583{
2584 uint32_t batchSize = 4;
2585 uint32_t vecSize = 29;
2586 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2587 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2588 { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
2589 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2590 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
2591
2592 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2593 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2594 { /* batch 0 */
2595 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
2596 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2597 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
2598 /* batch 1 */
2599 -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.1f,
2600 -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
2601 -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f,
2602 /* batch 2 */
2603 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.1f,
2604 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
2605 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f,
2606 /* batch 3 */
2607 -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
2608 -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
2609 -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
2610
2611 // Expect output = input * output + output.
2612 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2613 { /* batch 0 */
2614 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
2615 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
2616 172.396896f, 199.939606f, 229.522491f, 261.145599f, 294.808899f, 330.512421f,
2617 368.256134f, 408.040039f, 449.864075f, 493.728363f, 539.632874f, 587.577576f,
2618 637.562500f, 689.587585f, 743.652954f, 799.758423f, 0.000000f,
2619 /* batch 1 */
2620 -1.210000f, -4.840000f, -10.889999f, -19.360001f, -30.250000f, -43.559998f,
2621 -59.289997f, -77.440002f, -98.009995f, -102.010010f, -123.432091f, -146.894394f,
2622 -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
2623 -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
2624 -637.562500f, -689.587585f, -743.652954f, -799.758423f, 0.000000f,
2625 /* batch 2 */
2626 1.210000f, -4.840000f, 10.889999f, -19.360001f, 30.250000f, -43.559998f,
2627 59.289997f, -77.440002f, 98.009995f, -102.010010f, 123.432091f, -146.894394f,
2628 172.396896f, -199.939606f, 229.522491f, -261.145599f, 294.808899f, -330.512421f,
2629 368.256134f, -408.040039f, 449.864075f, -493.728363f, 539.632874f, -587.577576f,
2630 637.562500f, -689.587585f, 743.652954f, -799.758423f, 0.000000f,
2631 /* batch 3 */
2632 -1.210000f, 4.840000f, -10.889999f, 19.360001f, -30.250000f, 43.559998f,
2633 -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
2634 -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
2635 -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
2636 -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
2637
2638 return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
2639 vecSize, batchSize, expectedOutput);
2640}
2641
2642void LstmUtilsVectorBatchVectorAddTest()
2643{
2644 uint32_t batchSize = 2;
2645 uint32_t vecSize = 3;
2646 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2647 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2648 { 0.0f, -0.5f, 1.0f}));
2649
2650 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2651 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2652 { 1.0f, 2.0f, 3.0f, //batch 0
2653 4.0f, 5.0f, 6.0f})); //batch 1
2654
2655 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2656 { 1.0f, 1.5f, 4.0f,
2657 4.0f, 4.5f, 7.0f}));
2658
2659 return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
2660 vecSize, batchSize, expectedOutput);
2661}
2662
2663#endif
2664
2665LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
2666 armnn::IWorkloadFactory& workloadFactory,
Finn Williamsc43de6a2020-08-27 11:13:25 +01002667 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2668 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002669{
2670 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
2671 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2672 { 2., 3., 3., 4. }));
2673
2674 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
2675 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2676 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2677 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
2678 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002679 workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002680}
2681
2682LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
2683 armnn::IWorkloadFactory& workloadFactory,
Finn Williamsc43de6a2020-08-27 11:13:25 +01002684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2685 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002686{
2687 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2688 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2689 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2690 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
2691
2692 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
2693 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2694 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2695 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2696 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2697 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2698 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2699 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
2700 0.02168f}));
2701 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002702 workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002703}
2704
2705LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
2706 armnn::IWorkloadFactory& workloadFactory,
Finn Williamsc43de6a2020-08-27 11:13:25 +01002707 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2708 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002709{
2710 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
2711 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2712 {2., 3., 3., 4.}));
2713
2714 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
2715 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2716 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2717 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
2718
2719 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002720 workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002721}
2722
2723LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002724 armnn::IWorkloadFactory& workloadFactory,
2725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2726 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002727{
2728 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2729 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2730 {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
2731 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
2732
2733 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
2734 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2735 { 0.0244077f, 0.128027f, -0.00170918f, //batch 0
2736 -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
2737 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002738 workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002739}
2740
2741LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
2742 armnn::IWorkloadFactory& workloadFactory,
Finn Williamsc43de6a2020-08-27 11:13:25 +01002743 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2744 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002745{
2746 const float qScale = 1.0f;
2747 const int32_t qOffset = 0;
2748
Derek Lambertif90c56d2020-01-10 17:14:08 +00002749 const armnn::DataType datatype = armnn::DataType::QSymmS16;
2750 const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002751
2752 armnn::TensorInfo inputDesc({2, 2}, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002753 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(
2754 inputDesc,
2755 armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002756
2757 armnn::TensorInfo outputDesc({2, 4}, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002758 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(
2759 outputDesc,
2760 armnnUtils::QuantizedVector<int16_t>(
2761 {
2762 -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
2763 -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
2764 },
2765 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002766
2767 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002768 workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002769
2770}
2771
2772LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
2773 armnn::IWorkloadFactory& workloadFactory,
Finn Williamsc43de6a2020-08-27 11:13:25 +01002774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2775 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002776{
2777 const float qScale = 1.0f;
2778 const int32_t qOffset = 0;
2779
Derek Lambertif90c56d2020-01-10 17:14:08 +00002780 const armnn::DataType datatype = armnn::DataType::QSymmS16;
2781 const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002782
2783 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002784 boost::multi_array<int16_t, 2> input =
2785 MakeTensor<int16_t, 2>(
2786 inputDesc,
2787 armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002788
2789 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002790 boost::multi_array<int16_t, 2> expectedOutput =
2791 MakeTensor<int16_t, 2>(
2792 outputDesc,
2793 armnnUtils::QuantizedVector<int16_t>(
2794 {
2795 -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2796 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f
2797 },
2798 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002799
2800 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002801 workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002802}
2803
2804LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2805 armnn::IWorkloadFactory& workloadFactory,
Finn Williamsc43de6a2020-08-27 11:13:25 +01002806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2807 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002808{
2809 const float qScale = 2.0f;
2810 const int32_t qOffset = 0;
2811
Derek Lambertif90c56d2020-01-10 17:14:08 +00002812 const armnn::DataType datatype = armnn::DataType::QSymmS16;
2813 const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002814
2815 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002816 boost::multi_array<int16_t, 2> input =
2817 MakeTensor<int16_t, 2>(
2818 inputDesc,
2819 armnnUtils::QuantizedVector<int16_t>(
2820 {
2821 0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2822 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f
2823 },
2824 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002825
2826 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002827 boost::multi_array<int16_t, 2> expectedOutput =
2828 MakeTensor<int16_t, 2>(
2829 outputDesc,
2830 armnnUtils::QuantizedVector<int16_t>(
2831 {
2832 -0.00396806f, 0.02935200f, -0.00279226f, 0.01599770f,
2833 -0.00835576f, -0.02117790f, 0.02835120f, -0.01145970f,
2834 0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f,
2835 0.00914318f, 0.00415118f, 0.01714700f, 0.01342030f,
2836 -0.01386900f, 0.02872680f, -0.00334693f, 0.00733398f,
2837 -0.02879260f, -0.01869260f, 0.01936620f, -0.01154370f,
2838 0.00422612f, -0.03452320f, 0.00223253f, -0.00957321f,
2839 0.02106240f, 0.01333100f, 0.01509540f, 0.02168000f
2840 },
2841 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002842
2843 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002844 workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002845}
2846
2847LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2848 armnn::IWorkloadFactory& workloadFactory,
Finn Williamsc43de6a2020-08-27 11:13:25 +01002849 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2850 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002851{
2852 const float qScale = 1.0f;
2853 const int32_t qOffset = 0;
2854
Derek Lambertif90c56d2020-01-10 17:14:08 +00002855 const armnn::DataType datatype = armnn::DataType::QSymmS16; // datatype & constants set to QSymm16
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002856
2857 armnn::TensorInfo inputDesc({2, 2}, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002858 boost::multi_array<int16_t , 2> input =
2859 MakeTensor<int16_t , 2>(inputDesc,
2860 armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002861
2862 armnn::TensorInfo outputDesc({2, 4}, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002863 boost::multi_array<int16_t, 2> expectedOutput =
2864 MakeTensor<int16_t, 2>(
2865 outputDesc,
2866 armnnUtils::QuantizedVector<int16_t>(
2867 {
2868 -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
2869 -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
2870 },
2871 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002872
2873 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002874 workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, datatype);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002875}
2876
2877//
2878// QuantizedLstm
2879//
2880
2881LayerTestResult<uint8_t, 2> QuantizedLstmTest(
2882 armnn::IWorkloadFactory& workloadFactory,
Finn Williamsc43de6a2020-08-27 11:13:25 +01002883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2884 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002885{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002886 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002887 boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
2888 {166, 179, 50, 150}));
2889
Derek Lambertif90c56d2020-01-10 17:14:08 +00002890 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002891 boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
2892 {140, 151, 146, 112, 136, 156, 142, 112 }));
2893
Finn Williamsc43de6a2020-08-27 11:13:25 +01002894 return QuantizedLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002895}
James Conroy4f1f8992020-04-29 20:01:10 +01002896
2897// QLSTM
2898LayerTestResult<int8_t, 2> QLstmTest(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002899 armnn::IWorkloadFactory& workloadFactory,
2900 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2901 const armnn::ITensorHandleFactory& tensorHandleFactory)
James Conroy4f1f8992020-04-29 20:01:10 +01002902{
2903 armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8);
2904 boost::multi_array<int8_t, 2> input = MakeTensor<int8_t, 2>(inputDesc, std::vector<int8_t>(
2905 {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}));
2906
2907 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmS8);
2908 boost::multi_array<int8_t, 2> expectedOutput = MakeTensor<int8_t, 2>(outputDesc, std::vector<int8_t>(
2909 {-15, 21, 14, 20, -15, 15, 5, 27}));
2910
Finn Williamsc43de6a2020-08-27 11:13:25 +01002911 return QLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
James Conroy4f1f8992020-04-29 20:01:10 +01002912}
James Conroyb22a75e2020-06-08 14:53:10 +01002913
2914LayerTestResult<int8_t, 2> QLstmTest1(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002915 armnn::IWorkloadFactory& workloadFactory,
2916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2917 const armnn::ITensorHandleFactory& tensorHandleFactory)
James Conroyb22a75e2020-06-08 14:53:10 +01002918{
2919 armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8);
2920 boost::multi_array<int8_t, 2> input = MakeTensor<int8_t, 2>(inputDesc, std::vector<int8_t>(
2921 {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}));
2922
2923 armnn::TensorInfo outputDesc({2, 3}, armnn::DataType::QAsymmS8);
2924 boost::multi_array<int8_t, 2> expectedOutput = MakeTensor<int8_t, 2>(outputDesc, std::vector<int8_t>(
2925 {127, 127, -108, -67, 127, 127}));
2926
Finn Williamsc43de6a2020-08-27 11:13:25 +01002927 return QLstmTestImpl1(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
James Conroyb22a75e2020-06-08 14:53:10 +01002928}
2929
2930LayerTestResult<int8_t, 2> QLstmTest2(
Finn Williamsc43de6a2020-08-27 11:13:25 +01002931 armnn::IWorkloadFactory& workloadFactory,
2932 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2933 const armnn::ITensorHandleFactory& tensorHandleFactory)
James Conroyb22a75e2020-06-08 14:53:10 +01002934{
2935 armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8);
2936 boost::multi_array<int8_t, 2> input = MakeTensor<int8_t, 2>(inputDesc, std::vector<int8_t>(
2937 {90, 102, 13, 26, 38, 102, 13, 26, 51, 64}));
2938
2939 armnn::TensorInfo outputDesc({2, 3}, armnn::DataType::QAsymmS8);
2940 boost::multi_array<int8_t, 2> expectedOutput = MakeTensor<int8_t, 2>(outputDesc, std::vector<int8_t>(
2941 {127, 127, 127, -128, 127, 127}));
2942
Finn Williamsc43de6a2020-08-27 11:13:25 +01002943 return QLstmTestImpl2(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
James Conroyb22a75e2020-06-08 14:53:10 +01002944}