blob: f58ed0a99f9d933747b7e6bfcfe4409eab44ad14 [file] [log] [blame]
Sadik Armagan6a903a72020-05-26 10:41:54 +01001//
2// Copyright © 2020 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "../DriverTestHelpers.hpp"
7#include "../TestTensor.hpp"
8
9#include "../1.3/HalPolicy.hpp"
10
11#include <armnn/utility/IgnoreUnused.hpp>
12
13#include <boost/array.hpp>
14#include <boost/test/unit_test.hpp>
15#include <boost/test/data/test_case.hpp>
16#include <boost/math/special_functions/relative_difference.hpp>
17
18BOOST_AUTO_TEST_SUITE(QLSTMTests)
19
20using ArmnnDriver = armnn_driver::ArmnnDriver;
21using DriverOptions = armnn_driver::DriverOptions;
22
23using namespace driverTestHelpers;
24using namespace android::hardware;
25
26using HalPolicy = hal_1_3::HalPolicy;
27
28namespace
29{
30
31template<typename T>
32RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
33{
34 DataLocation inputInloc = {};
35 inputInloc.poolIndex = poolIndex;
36 inputInloc.offset = 0;
37 inputInloc.length = value.size() * sizeof(T);
38 RequestArgument inputRequestArgument = {};
39 inputRequestArgument.location = inputInloc;
40 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
41 return inputRequestArgument;
42}
43
44// Returns true if the relative difference between two float values is less than the tolerance value given.
45// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
46bool TolerantCompareEqual(float a, float b, float tolerance = 1.0f)
47{
48 float rd;
49 if (a == 0.0f)
50 {
51 rd = fabs(b);
52 }
53 else if (b == 0.0f)
54 {
55 rd = fabs(a);
56 }
57 else
58 {
59 rd = boost::math::relative_difference(a, b);
60 }
61 return rd < tolerance;
62}
63
64// Helper function to create an OperandLifeTime::NO_VALUE for testing.
65// To be used on optional input operands that have no values - these are valid and should be tested.
66HalPolicy::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
67{
68 // Only create a NO_VALUE for optional operands that have no elements
69 if (dimensions.size() == 0 || dimensions[0] == 0)
70 {
71 return HalPolicy::OperandLifeTime::NO_VALUE;
72 }
73 return HalPolicy::OperandLifeTime::CONSTANT_COPY;
74}
75
76void ExecuteModel(const armnn_driver::hal_1_3::HalPolicy::Model& model,
77 armnn_driver::ArmnnDriver& driver,
78 const V1_0::Request& request)
79{
80 android::sp<V1_3::IPreparedModel> preparedModel = PrepareModel_1_3(model, driver);
81 if (preparedModel.get() != nullptr)
82 {
83 Execute(preparedModel, request);
84 }
85}
86
87#ifndef ARMCOMPUTECL_ENABLED
88static const boost::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
89#else
90static const boost::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::CpuAcc }};
91#endif
92
93// Add our own tests here since we skip the qlstm tests which Google supplies (because of non-const weights)
94void QLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
95 const std::vector<int8_t>& inputValue,
96 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
97 const std::vector<int8_t>& inputToInputWeightsValue,
98 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
99 const std::vector<int8_t>& inputToForgetWeightsValue,
100 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
101 const std::vector<int8_t>& inputToCellWeightsValue,
102 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
103 const std::vector<int8_t>& inputToOutputWeightsValue,
104 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
105 const std::vector<int8_t>& recurrentToInputWeightsValue,
106 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
107 const std::vector<int8_t>& recurrentToForgetWeightsValue,
108 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
109 const std::vector<int8_t>& recurrentToCellWeightsValue,
110 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
111 const std::vector<int8_t>& recurrentToOutputWeightsValue,
112 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
113 const std::vector<int16_t>& cellToInputWeightsValue,
114 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
115 const std::vector<int16_t>& cellToForgetWeightsValue,
116 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
117 const std::vector<int16_t>& cellToOutputWeightsValue,
118 const hidl_vec<uint32_t>& inputGateBiasDimensions,
119 const std::vector<int32_t>& inputGateBiasValue,
120 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
121 const std::vector<int32_t>& forgetGateBiasValue,
122 const hidl_vec<uint32_t>& cellBiasDimensions,
123 const std::vector<int32_t>& cellBiasValue,
124 const hidl_vec<uint32_t>& outputGateBiasDimensions,
125 const std::vector<int32_t>& outputGateBiasValue,
126 const hidl_vec<uint32_t>& projectionWeightsDimensions,
127 const std::vector<int8_t>& projectionWeightsValue,
128 const hidl_vec<uint32_t>& projectionBiasDimensions,
129 const std::vector<int32_t>& projectionBiasValue,
130 const hidl_vec<uint32_t>& outputPreviousTimeStepInDimensions,
131 const std::vector<int8_t>& outputPreviousTimeStepInValue,
132 const hidl_vec<uint32_t>& cellStatePreviousTimeStepInDimensions,
133 const std::vector<int16_t>& cellStatePreviousTimeStepInValue,
134 const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
135 const std::vector<int16_t>& inputLayerNormWeightsValue,
136 const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
137 const std::vector<int16_t>& forgetLayerNormWeightsValue,
138 const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
139 const std::vector<int16_t>& cellLayerNormWeightsValue,
140 const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
141 const std::vector<int16_t>& outputLayerNormWeightsValue,
142 const float& cellClipValue,
143 const float& projectionClipValue,
144 const float& matMulInputGateValue,
145 const float& matMulForgetGateValue,
146 const float& matMulCellGateValue,
147 const float& matMulOutputGateValue,
148 const int32_t& projInputZeroPointValue,
149 const float& projInputScaleValue,
150 const hidl_vec<uint32_t>& outputStateOutDimensions,
151 const std::vector<int8_t>& outputStateOutValue,
152 const hidl_vec<uint32_t>& cellStateOutDimensions,
153 const std::vector<int16_t>& cellStateOutValue,
154 const hidl_vec<uint32_t>& outputDimensions,
155 const std::vector<int8_t>& outputValue,
156 armnn::Compute compute)
157{
158 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
159 HalPolicy::Model model = {};
160
161 // Scale/Offset quantization info
162 float inputScale = 0.0078125f;
163 int32_t inputOffset = 0;
164
165 int32_t hiddenStateZeroPoint = 0;
166 float hiddenStateScale = 0.007f;
167
168 float outputScale = hiddenStateScale;
169 int32_t outputOffset = hiddenStateZeroPoint;
170
171 float cellStateScale = 3.05176e-05f;
172 float cellWeightsScale = 1.0f;
173 int32_t cellStateOffset = 0;
174
175 float weightsScale = 0.00784314f;
176 int32_t weightsOffset = 0;
177
178 float layerNormScale = 3.05182e-05f;
179 int32_t layerNormOffset = 0;
180
181 float biasScale = layerNormScale / 1024;
182 int32_t biasOffset = 0;
183
184 // Inputs:
185 // 00: The input to the LSTM cell. Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, inputSize]
186 AddInputOperand<HalPolicy>(model,
187 inputDimensions,
188 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
189 inputScale,
190 inputOffset);
191
192 // 01: The input-to-input weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
193 AddTensorOperand<HalPolicy>(model,
194 inputToInputWeightsDimensions,
195 inputToInputWeightsValue,
196 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
197 CreateNoValueLifeTime(inputToInputWeightsDimensions),
198 weightsScale,
199 weightsOffset);
200
201 // 02: The input-to-forget weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
202 AddTensorOperand<HalPolicy>(model,
203 inputToForgetWeightsDimensions,
204 inputToForgetWeightsValue,
205 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
206 CreateNoValueLifeTime(inputToForgetWeightsDimensions),
207 weightsScale,
208 weightsOffset);
209
210 // 03: The input-to-cell weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
211 AddTensorOperand<HalPolicy>(model,
212 inputToCellWeightsDimensions,
213 inputToCellWeightsValue,
214 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
215 CreateNoValueLifeTime(inputToCellWeightsDimensions),
216 weightsScale,
217 weightsOffset);
218
219 // 04: The input-to-output weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
220 AddTensorOperand<HalPolicy>(model,
221 inputToOutputWeightsDimensions,
222 inputToOutputWeightsValue,
223 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
224 CreateNoValueLifeTime(inputToOutputWeightsDimensions),
225 weightsScale,
226 weightsOffset);
227
228 // 05: The recurrent-to-input weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM
229 // Shape: [numUnits, outputSize]
230 AddTensorOperand<HalPolicy>(model,
231 recurrentToInputWeightsDimensions,
232 recurrentToInputWeightsValue,
233 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
234 CreateNoValueLifeTime(recurrentToInputWeightsDimensions),
235 weightsScale,
236 weightsOffset);
237
238 // 06: The recurrent-to-forget weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize]
239 AddTensorOperand<HalPolicy>(model,
240 recurrentToForgetWeightsDimensions,
241 recurrentToForgetWeightsValue,
242 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
243 CreateNoValueLifeTime(recurrentToForgetWeightsDimensions),
244 weightsScale,
245 weightsOffset);
246
247 // 07: The recurrent-to-cell weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize]
248 AddTensorOperand<HalPolicy>(model,
249 recurrentToCellWeightsDimensions,
250 recurrentToCellWeightsValue,
251 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
252 CreateNoValueLifeTime(recurrentToCellWeightsDimensions),
253 weightsScale,
254 weightsOffset);
255
256 // 08: The recurrent-to-output weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize]
257 AddTensorOperand<HalPolicy>(model,
258 recurrentToOutputWeightsDimensions,
259 recurrentToOutputWeightsValue,
260 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
261 CreateNoValueLifeTime(recurrentToOutputWeightsDimensions),
262 weightsScale,
263 weightsOffset);
264
265 // 09: The cell-to-input weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
266 // Shape: [numUnits]
267 AddTensorOperand<HalPolicy>(model,
268 cellToInputWeightsDimensions,
269 cellToInputWeightsValue,
270 HalPolicy::OperandType::TENSOR_QUANT16_SYMM ,
271 CreateNoValueLifeTime(cellToInputWeightsDimensions),
272 cellWeightsScale,
273 weightsOffset);
274
275 // 10: The cell-to-forget weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
276 // Shape: [numUnits].
277 AddTensorOperand<HalPolicy>(model,
278 cellToForgetWeightsDimensions,
279 cellToForgetWeightsValue,
280 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
281 CreateNoValueLifeTime(cellToForgetWeightsDimensions),
282 cellWeightsScale,
283 weightsOffset);
284
285 // 11: The cell-to-output weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
286 // Shape: [numUnits]
287 AddTensorOperand<HalPolicy>(model,
288 cellToOutputWeightsDimensions,
289 cellToOutputWeightsValue,
290 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
291 CreateNoValueLifeTime(cellToOutputWeightsDimensions),
292 cellWeightsScale,
293 weightsOffset);
294
295 // 12: The input gate bias. Quantized with scale being the product of input and weights scales
296 // and zeroPoint equal to 0. Optional. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
297 AddTensorOperand<HalPolicy>(model,
298 inputGateBiasDimensions,
299 inputGateBiasValue,
300 HalPolicy::OperandType::TENSOR_INT32,
301 CreateNoValueLifeTime(inputGateBiasDimensions),
302 biasScale,
303 biasOffset);
304
305 // 13: The forget gate bias. Quantized with scale being the product of input and weights scales
306 // and zeroPoint equal to 0. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
307 AddTensorOperand<HalPolicy>(model,
308 forgetGateBiasDimensions,
309 forgetGateBiasValue,
310 HalPolicy::OperandType::TENSOR_INT32,
311 CreateNoValueLifeTime(forgetGateBiasDimensions),
312 biasScale,
313 biasOffset);
314
315 // 14: The cell bias. Quantized with scale being the product of input and weights scales and zeroPoint equal to 0.
316 // Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
317 AddTensorOperand<HalPolicy>(model,
318 cellBiasDimensions,
319 cellBiasValue,
320 HalPolicy::OperandType::TENSOR_INT32,
321 CreateNoValueLifeTime(cellBiasDimensions),
322 biasScale,
323 biasOffset);
324
325 // 15: The output gate bias. Quantized with scale being the product of input and weights scales
326 // and zeroPoint equal to 0. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
327 AddTensorOperand<HalPolicy>(model,
328 outputGateBiasDimensions,
329 outputGateBiasValue,
330 HalPolicy::OperandType::TENSOR_INT32,
331 CreateNoValueLifeTime(outputGateBiasDimensions),
332 biasScale,
333 biasOffset);
334
335 // 16: The projection weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [outputSize, numUnits]
336 AddTensorOperand<HalPolicy>(model,
337 projectionWeightsDimensions,
338 projectionWeightsValue,
339 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
340 CreateNoValueLifeTime(projectionWeightsDimensions),
341 0.00392157f,
342 weightsOffset);
343
344 // 17: The projection bias. Quantized with scale being the product of input and weights scales
345 // and zeroPoint equal to 0. Optional. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [outputSize]
346 AddTensorOperand<HalPolicy>(model,
347 projectionBiasDimensions,
348 projectionBiasValue,
349 HalPolicy::OperandType::TENSOR_INT32,
350 CreateNoValueLifeTime(projectionBiasDimensions),
351 0.0f,
352 biasOffset);
353
354 // 18: The output from the previous time step. Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
355 // Shape: [batchSize, outputSize]
356 AddInputOperand<HalPolicy>(model,
357 outputPreviousTimeStepInDimensions,
358 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
359 cellStateScale,
360 inputOffset);
361
362 // 19: The cell state from the previous time step. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
363 // Shape: [batchSize, numUnits]
364 AddInputOperand<HalPolicy>(model,
365 cellStatePreviousTimeStepInDimensions,
366 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
367 cellStateScale,
368 cellStateOffset);
369
370 // If any of the tensors have a value all normalization tensors are set
371 if (!inputLayerNormWeightsValue.empty() ||
372 !forgetLayerNormWeightsValue.empty() ||
373 !cellLayerNormWeightsValue.empty() ||
374 !outputLayerNormWeightsValue.empty())
375 {
376 // Normalization:
377 // 20: The input layer normalization weights. Used to rescale normalized inputs to activation at input gate.
378 // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
379 AddTensorOperand<HalPolicy>(model,
380 inputLayerNormWeightsDimensions,
381 inputLayerNormWeightsValue,
382 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
383 CreateNoValueLifeTime(inputLayerNormWeightsDimensions),
384 layerNormScale,
385 layerNormOffset);
386
387 // 21: The forget layer normalization weights. Used to rescale normalized inputs to activation at forget gate.
388 // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
389 AddTensorOperand<HalPolicy>(model,
390 forgetLayerNormWeightsDimensions,
391 forgetLayerNormWeightsValue,
392 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
393 CreateNoValueLifeTime(forgetLayerNormWeightsDimensions),
394 layerNormScale,
395 layerNormOffset);
396
397 // 22: The cell layer normalization weights. Used to rescale normalized inputs to activation at cell gate.
398 // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
399 AddTensorOperand<HalPolicy>(model,
400 cellLayerNormWeightsDimensions,
401 cellLayerNormWeightsValue,
402 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
403 CreateNoValueLifeTime(cellLayerNormWeightsDimensions),
404 layerNormScale,
405 layerNormOffset);
406
407 // 23: The output layer normalization weights. Used to rescale normalized inputs to activation at output gate.
408 // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
409 AddTensorOperand<HalPolicy>(model,
410 outputLayerNormWeightsDimensions,
411 outputLayerNormWeightsValue,
412 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
413 CreateNoValueLifeTime(outputLayerNormWeightsDimensions),
414 layerNormScale,
415 layerNormOffset);
416 }
417
418 // Constant scalar values
419 // 24: The cell clip. If provided the cell state is clipped by this value prior to the cell output activation.
420 // Optional. Type: ANEURALNETWORKS_FLOAT32.
421 AddFloatOperand<HalPolicy>(model, cellClipValue);
422
423 // Constant scalar values
424 // 25: The projection clip. If provided and projection is enabled, this is used for clipping the projected values.
425 // Optional. Type: ANEURALNETWORKS_FLOAT32.
426 AddFloatOperand<HalPolicy>(model, projectionClipValue);
427
428 // Constant scalar values
429 // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate.
430 // Type: ANEURALNETWORKS_FLOAT32.
431 AddFloatOperand<HalPolicy>(model, matMulInputGateValue);
432
433 // Constant scalar values
434 // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate.
435 // Type: ANEURALNETWORKS_FLOAT32.
436 AddFloatOperand<HalPolicy>(model, matMulForgetGateValue);
437
438 // Constant scalar values
439 // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate.
440 // Type: ANEURALNETWORKS_FLOAT32.
441 AddFloatOperand<HalPolicy>(model, matMulCellGateValue);
442
443 // Constant scalar values
444 // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate.
445 // Type: ANEURALNETWORKS_FLOAT32.
446 AddFloatOperand<HalPolicy>(model, matMulOutputGateValue);
447
448 // Constant scalar values
449 // 30: The zero point of the hidden state, i.e. input to projection. Type: ANEURALNETWORKS_INT32.
450 AddIntOperand<HalPolicy>(model, projInputZeroPointValue);
451
452 // Constant scalar values
453 // 31: The scale of the hidden state, i.e. input to projection. Type: ANEURALNETWORKS_FLOAT32.
454 AddFloatOperand<HalPolicy>(model, projInputScaleValue);
455
456 // Outputs:
457 // 0: The output state (out). Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, outputSize]
458 AddOutputOperand<HalPolicy>(model,
459 outputStateOutDimensions,
460 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
461 cellStateScale,
462 cellStateScale);
463
464 // 1: The cell state (out). Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [batchSize, numUnits].
465 AddOutputOperand<HalPolicy>(model,
466 cellStateOutDimensions,
467 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
468 cellStateScale,
469 cellStateOffset);
470
471 // 2: The output. This is effectively the same as the current "output state (out)" value.
472 // Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, outputSize]
473 AddOutputOperand<HalPolicy>(model,
474 outputDimensions,
475 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
476 cellStateScale,
477 cellStateScale);
478
479 // make the QUANTIZED_LSTM operation
480 model.main.operations.resize(1);
481 model.main.operations[0].type = HalPolicy::OperationType::QUANTIZED_LSTM;
482
483 model.main.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
484 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
485 24, 25, 26, 27, 28, 29, 30, 31};
486 model.main.operations[0].outputs = hidl_vec<uint32_t> {32, 33, 34};
487
488 // define the input values
489 hidl_vec<RequestArgument> inputArguments;
490 inputArguments.resize(3);
491
492 inputArguments[0] = CreateRequestArgument<int8_t>(inputValue, 0);
493 inputArguments[1] = CreateRequestArgument<int8_t>(outputPreviousTimeStepInValue, 1);
494 inputArguments[2] = CreateRequestArgument<int16_t>(cellStatePreviousTimeStepInValue, 2);
495
496 // define the expected output values
497 hidl_vec<RequestArgument> outputArguments;
498 outputArguments.resize(3);
499
500 outputArguments[0] = CreateRequestArgument<int8_t>(outputStateOutValue, 3);
501 outputArguments[1] = CreateRequestArgument<int16_t>(cellStateOutValue, 4);
502 outputArguments[2] = CreateRequestArgument<int8_t>(outputValue, 5);
503
504 android::hardware::neuralnetworks::V1_0::Request request = {};
505 request.inputs = inputArguments;
506 request.outputs = outputArguments;
507
508 // set the input data
509 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
510 AddPoolAndSetData(outputPreviousTimeStepInValue.size(), request, outputPreviousTimeStepInValue.data());
511 AddPoolAndSetData(cellStatePreviousTimeStepInValue.size(), request, cellStatePreviousTimeStepInValue.data());
512
513 // add memory for the outputs
514 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData<int8_t>(outputStateOutValue.size(), request);
515 int8_t* outputStateOutData = static_cast<int8_t*>(static_cast<void*>(outputStateOutMemory->getPointer()));
516
517 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<int16_t>(cellStateOutValue.size(), request);
518 int16_t* cellStateOutData = static_cast<int16_t*>(static_cast<void*>(cellStateOutMemory->getPointer()));
519
520 android::sp<IMemory> outputMemory = AddPoolAndGetData<int8_t>(outputValue.size(), request);
521 int8_t* outputData = static_cast<int8_t*>(static_cast<void*>(outputMemory->getPointer()));
522
523 // make the prepared model and run the execution
524 ExecuteModel(model, *driver, request);
525
526 // check the results
527 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
528 {
529 BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
530 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
531 }
532
533 // CELL STATE OUTPUT Does not match currently: IVGCVSW-4860 Verify remaining VTS tests (2) for QLSTM
534 // Comment out for now
535 // for (size_t i = 0; i < cellStateOutValue.size(); ++i)
536 // {
537 // BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
538 // "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
539 //}
540
541 for (size_t i = 0; i < outputValue.size(); ++i)
542 {
543 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
544 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
545 }
546}
547
548void QLstmWithProjection(armnn::Compute compute)
549{
550 // This replicates android/frameworks/ml/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py
551 // with values from android/frameworks/ml/nn/runtime/test/generated/spec_V1_3/qlstm_projection.example.cpp
552 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of SUBGRAPH_INPUT tensors).
553
554 uint32_t batchSize = 2;
555 uint32_t inputSize = 5;
556 uint32_t outputSize = 3;
557 uint32_t numUnits = 4;
558
559 // Inputs:
560 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
561 std::vector<int8_t> inputValue{ 90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
562
563 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
564 std::vector<int8_t> inputToInputWeightsValue{ 64, 77, 89, -102,
565 -115, 13, 25, 38,
566 -51, 64, -102, 89,
567 -77, 64, -51, -64,
568 -51, -38, -25, -13 };
569
570 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
571 std::vector<int8_t> inputToForgetWeightsValue{ -77, -13, 38, 25,
572 115, -64, -25, -51,
573 38, -102, -51, 38,
574 -64, -51, -77, 38,
575 -51, -77, -64, -64 };
576
577 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
578 std::vector<int8_t> inputToCellWeightsValue{ -51, -38, -25, -13,
579 -64, 64, -25, -38,
580 -25, -77, 77, -13,
581 -51, -38, -89, 89,
582 -115, -64, 102, 77 };
583
584 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
585 std::vector<int8_t> inputToOutputWeightsValue{ -102, -51, -25, -115,
586 -13, -89, 38, -38,
587 -102, -25, 77, -25,
588 51, -89, -38, -64,
589 13, 64, -77, -51 };
590
591 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
592 std::vector<int8_t> recurrentToInputWeightsValue{ -25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77 };
593
594 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
595 std::vector<int8_t> recurrentToForgetWeightsValue{ -64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25 };
596
597 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
598 std::vector<int8_t> recurrentToCellWeightsValue{ -38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25 };
599
600 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
601 std::vector<int8_t> recurrentToOutputWeightsValue{ 38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25 };
602
603 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
604 std::vector<int16_t> cellToInputWeightsValue;
605
606 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
607 std::vector<int16_t> cellToForgetWeightsValue;
608
609 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
610 std::vector<int16_t> cellToOutputWeightsValue;
611
612 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
613 std::vector<int32_t> inputGateBiasValue{ 644245, 3221226, 4724464, 8160438 };
614
615 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
616 std::vector<int32_t> forgetGateBiasValue{ 2147484, -6442451, -4294968, 2147484 };
617
618 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
619 std::vector<int32_t> cellBiasValue{-1073742, 15461883, 5368709, 1717987 };
620
621 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
622 std::vector<int32_t> outputGateBiasValue{ 1073742, -214748, 4294968, 2147484 };
623
624 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
625 std::vector<int8_t> projectionWeightsValue{ -25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51 };
626
627 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
628 std::vector<int32_t> projectionBiasValue{ 0, 0, 0 };
629
630 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
631 std::vector<int8_t> outputStateInValue{ 0, 0, 0, 0, 0, 0 };
632
633 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
634 std::vector<int16_t> cellStateInValue{ 0, 0, 0, 0, 0, 0, 0, 0 };
635
636 // Normalization:
637 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
638 std::vector<int16_t> inputLayerNormWeightsValue{ 3277, 6553, 9830, 16384 };
639
640 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
641 std::vector<int16_t> forgetLayerNormWeightsValue{ 6553, 6553, 13107, 9830 };
642
643 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
644 std::vector<int16_t> cellLayerNormWeightsValue{ 22937, 6553, 9830, 26214 };
645
646 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
647 std::vector<int16_t> outputLayerNormWeightsValue{ 19660, 6553, 6553, 16384 };
648
649 float cellClipValue = 0.0f;
650 float projectionClipValue = 0.0f;
651 float inputIntermediateScale = 0.007059f;
652 float forgetIntermediateScale = 0.007812f;
653 float cellIntermediateScale = 0.007059f;
654 float outputIntermediateScale = 0.007812f;
655 int32_t hiddenStateZeroPoint = 0;
656 float hiddenStateScale = 0.007f;
657
658 // Outputs:
659 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
660 std::vector<int8_t> outputStateOutValue{ 127, 127, -108, -67, 127, 127 };
661
662 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
663 std::vector<int16_t> cellStateOutValue { -14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939 };
664
665 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
666 std::vector<int8_t> outputValue { 127, 127, -108, -67, 127, 127 };
667
668 QLstmTestImpl(inputDimensions, inputValue,
669 inputToInputWeightsDimensions, inputToInputWeightsValue,
670 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
671 inputToCellWeightsDimensions, inputToCellWeightsValue,
672 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
673 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
674 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
675 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
676 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
677 cellToInputWeightsDimensions, cellToInputWeightsValue,
678 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
679 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
680 inputGateBiasDimensions, inputGateBiasValue,
681 forgetGateBiasDimensions, forgetGateBiasValue,
682 cellBiasDimensions, cellBiasValue,
683 outputGateBiasDimensions, outputGateBiasValue,
684 projectionWeightsDimensions, projectionWeightsValue,
685 projectionBiasDimensions, projectionBiasValue,
686 outputStateInDimensions, outputStateInValue,
687 cellStateInDimensions, cellStateInValue,
688 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
689 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
690 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
691 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
692 cellClipValue,
693 projectionClipValue,
694 inputIntermediateScale,
695 forgetIntermediateScale,
696 cellIntermediateScale,
697 outputIntermediateScale,
698 hiddenStateZeroPoint,
699 hiddenStateScale,
700 outputStateOutDimensions, outputStateOutValue,
701 cellStateOutDimensions, cellStateOutValue,
702 outputDimensions, outputValue,
703 compute);
704}
705
706void QLstmWithNoProjection(armnn::Compute compute)
707{
708 // This replicates android/frameworks/ml/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py
709 // with values from android/frameworks/ml/nn/runtime/test/generated/spec_V1_3/qlstm_noprojection.example.cpp
710 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of SUBGRAPH_INPUT tensors).
711
712 uint32_t batchSize = 2;
713 uint32_t inputSize = 5;
714 uint32_t outputSize = 4;
715 uint32_t numUnits = 4;
716
717 // Inputs:
718 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
719 std::vector<int8_t> inputValue { 90, 102, 13, 26, 38, 102, 13, 26, 51, 64 };
720
721 hidl_vec<uint32_t> inputToInputWeightsDimensions{0, 0};
722 std::vector<int8_t> inputToInputWeightsValue;
723
724 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
725 std::vector<int8_t> inputToForgetWeightsValue { -77, -13, 38, 25, 115,
726 -64, -25, -51, 38, -102,
727 -51, 38, -64, -51, -77,
728 38, -51, -77, -64, -64 };
729
730 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
731 std::vector<int8_t> inputToCellWeightsValue { -51, -38, -25, -13, -64,
732 64, -25, -38, -25, -77,
733 77, -13, -51, -38, -89,
734 89, -115, -64, 102, 77 };
735
736 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
737 std::vector<int8_t> inputToOutputWeightsValue { -102, -51, -25, -115, -13,
738 -89, 38, -38, -102, -25,
739 77, -25, 51, -89, -38,
740 -64, 13, 64, -77, -51 };
741
742 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0, 0};
743 std::vector<int8_t> recurrentToInputWeightsValue;
744
745 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
746 std::vector<int8_t> recurrentToForgetWeightsValue { -64, -38, -64, -25,
747 77, 51, 115, 38,
748 -13, 25, 64, 25,
749 25, 38, -13, 51 };
750
751 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
752 std::vector<int8_t> recurrentToCellWeightsValue { -38, 25, 13, -38,
753 102, -10, -25, 38,
754 102, -77, -13, 25,
755 38, -13, 25, 64 };
756
757 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
758 std::vector<int8_t> recurrentToOutputWeightsValue { 38, -13, 13, -25,
759 -64, -89, -25, -77,
760 -13, -51, -89, -25,
761 13, 64, 25, -38 };
762
763 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
764 std::vector<int16_t> cellToInputWeightsValue;
765
766 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
767 std::vector<int16_t> cellToForgetWeightsValue;
768
769 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
770 std::vector<int16_t> cellToOutputWeightsValue;
771
772 hidl_vec<uint32_t> inputGateBiasDimensions{0};
773 std::vector<int32_t> inputGateBiasValue;
774
775 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
776 std::vector<int32_t> forgetGateBiasValue { 2147484, -6442451, -4294968, 2147484 };
777
778 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
779 std::vector<int32_t> cellBiasValue { -1073742, 15461883, 5368709, 1717987 };
780
781 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
782 std::vector<int32_t> outputGateBiasValue { 1073742, -214748, 4294968, 2147484 };
783
784 hidl_vec<uint32_t> projectionWeightsDimensions{0, 0};
785 std::vector<int8_t> projectionWeightsValue;
786
787 hidl_vec<uint32_t> projectionBiasDimensions{0};
788 std::vector<int32_t> projectionBiasValue;
789
790 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
791 std::vector<int8_t> outputStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 };
792
793 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
794 std::vector<int16_t> cellStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 };
795
796 // Normalization:
797 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
798 std::vector<int16_t> inputLayerNormWeightsValue;
799
800 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
801 std::vector<int16_t> forgetLayerNormWeightsValue { 6553, 6553, 13107, 9830 };
802
803 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
804 std::vector<int16_t> cellLayerNormWeightsValue { 22937, 6553, 9830, 26214 };
805
806 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
807 std::vector<int16_t> outputLayerNormWeightsValue { 19660, 6553, 6553, 16384 };
808
809 float cellClipValue = 0.0f;
810 float projectionClipValue = 0.0f;
811 float inputIntermediateScale = 0.007059f;
812 float forgetIntermediateScale = 0.007812f;
813 float cellIntermediateScale = 0.007059f;
814 float outputIntermediateScale = 0.007812f;
815 int32_t hiddenStateZeroPoint = 0;
816 float hiddenStateScale = 0.007f;
817
818 // Outputs:
819 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
820 std::vector<int8_t> outputStateOutValue { -15, 21, 14, 20, -15, 15, 5, 27 };
821
822 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
823 std::vector<int16_t> cellStateOutValue { -11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149 };
824
825 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
826 std::vector<int8_t> outputValue { -15, 21, 14, 20, -15, 15, 5, 27 };
827
828 QLstmTestImpl(inputDimensions, inputValue,
829 inputToInputWeightsDimensions, inputToInputWeightsValue,
830 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
831 inputToCellWeightsDimensions, inputToCellWeightsValue,
832 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
833 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
834 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
835 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
836 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
837 cellToInputWeightsDimensions, cellToInputWeightsValue,
838 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
839 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
840 inputGateBiasDimensions, inputGateBiasValue,
841 forgetGateBiasDimensions, forgetGateBiasValue,
842 cellBiasDimensions, cellBiasValue,
843 outputGateBiasDimensions, outputGateBiasValue,
844 projectionWeightsDimensions, projectionWeightsValue,
845 projectionBiasDimensions, projectionBiasValue,
846 outputStateInDimensions, outputStateInValue,
847 cellStateInDimensions, cellStateInValue,
848 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
849 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
850 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
851 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
852 cellClipValue,
853 projectionClipValue,
854 inputIntermediateScale,
855 forgetIntermediateScale,
856 cellIntermediateScale,
857 outputIntermediateScale,
858 hiddenStateZeroPoint,
859 hiddenStateScale,
860 outputStateOutDimensions, outputStateOutValue,
861 cellStateOutDimensions, cellStateOutValue,
862 outputDimensions, outputValue,
863 compute);
864}
865
Sadik Armagan34db1872020-09-03 15:22:29 +0100866void DynamicOutputQLstmWithNoProjection(armnn::Compute compute)
867{
868 // This replicates android/frameworks/ml/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py
869 // with values from android/frameworks/ml/nn/runtime/test/generated/spec_V1_3/qlstm_noprojection.example.cpp
870 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of SUBGRAPH_INPUT tensors)
871 // and made cellStateOutput dynamic.
872
873 uint32_t batchSize = 2;
874 uint32_t inputSize = 5;
875 uint32_t outputSize = 4;
876 uint32_t numUnits = 4;
877
878 // Inputs:
879 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
880 std::vector<int8_t> inputValue { 90, 102, 13, 26, 38, 102, 13, 26, 51, 64 };
881
882 hidl_vec<uint32_t> inputToInputWeightsDimensions{0, 0};
883 std::vector<int8_t> inputToInputWeightsValue;
884
885 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
886 std::vector<int8_t> inputToForgetWeightsValue { -77, -13, 38, 25, 115,
887 -64, -25, -51, 38, -102,
888 -51, 38, -64, -51, -77,
889 38, -51, -77, -64, -64 };
890
891 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
892 std::vector<int8_t> inputToCellWeightsValue { -51, -38, -25, -13, -64,
893 64, -25, -38, -25, -77,
894 77, -13, -51, -38, -89,
895 89, -115, -64, 102, 77 };
896
897 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
898 std::vector<int8_t> inputToOutputWeightsValue { -102, -51, -25, -115, -13,
899 -89, 38, -38, -102, -25,
900 77, -25, 51, -89, -38,
901 -64, 13, 64, -77, -51 };
902
903 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0, 0};
904 std::vector<int8_t> recurrentToInputWeightsValue;
905
906 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
907 std::vector<int8_t> recurrentToForgetWeightsValue { -64, -38, -64, -25,
908 77, 51, 115, 38,
909 -13, 25, 64, 25,
910 25, 38, -13, 51 };
911
912 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
913 std::vector<int8_t> recurrentToCellWeightsValue { -38, 25, 13, -38,
914 102, -10, -25, 38,
915 102, -77, -13, 25,
916 38, -13, 25, 64 };
917
918 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
919 std::vector<int8_t> recurrentToOutputWeightsValue { 38, -13, 13, -25,
920 -64, -89, -25, -77,
921 -13, -51, -89, -25,
922 13, 64, 25, -38 };
923
924 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
925 std::vector<int16_t> cellToInputWeightsValue;
926
927 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
928 std::vector<int16_t> cellToForgetWeightsValue;
929
930 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
931 std::vector<int16_t> cellToOutputWeightsValue;
932
933 hidl_vec<uint32_t> inputGateBiasDimensions{0};
934 std::vector<int32_t> inputGateBiasValue;
935
936 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
937 std::vector<int32_t> forgetGateBiasValue { 2147484, -6442451, -4294968, 2147484 };
938
939 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
940 std::vector<int32_t> cellBiasValue { -1073742, 15461883, 5368709, 1717987 };
941
942 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
943 std::vector<int32_t> outputGateBiasValue { 1073742, -214748, 4294968, 2147484 };
944
945 hidl_vec<uint32_t> projectionWeightsDimensions{0, 0};
946 std::vector<int8_t> projectionWeightsValue;
947
948 hidl_vec<uint32_t> projectionBiasDimensions{0};
949 std::vector<int32_t> projectionBiasValue;
950
951 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
952 std::vector<int8_t> outputStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 };
953
954 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
955 std::vector<int16_t> cellStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 };
956
957 // Normalization:
958 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
959 std::vector<int16_t> inputLayerNormWeightsValue;
960
961 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
962 std::vector<int16_t> forgetLayerNormWeightsValue { 6553, 6553, 13107, 9830 };
963
964 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
965 std::vector<int16_t> cellLayerNormWeightsValue { 22937, 6553, 9830, 26214 };
966
967 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
968 std::vector<int16_t> outputLayerNormWeightsValue { 19660, 6553, 6553, 16384 };
969
970 float cellClipValue = 0.0f;
971 float projectionClipValue = 0.0f;
972 float inputIntermediateScale = 0.007059f;
973 float forgetIntermediateScale = 0.007812f;
974 float cellIntermediateScale = 0.007059f;
975 float outputIntermediateScale = 0.007812f;
976 int32_t hiddenStateZeroPoint = 0;
977 float hiddenStateScale = 0.007f;
978
979 // Outputs:
980 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
981 std::vector<int8_t> outputStateOutValue { -15, 21, 14, 20, -15, 15, 5, 27 };
982
983 hidl_vec<uint32_t> cellStateOutDimensions{};
984 std::vector<int16_t> cellStateOutValue { -11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149 };
985
986 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
987 std::vector<int8_t> outputValue { -15, 21, 14, 20, -15, 15, 5, 27 };
988
989 QLstmTestImpl(inputDimensions, inputValue,
990 inputToInputWeightsDimensions, inputToInputWeightsValue,
991 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
992 inputToCellWeightsDimensions, inputToCellWeightsValue,
993 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
994 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
995 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
996 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
997 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
998 cellToInputWeightsDimensions, cellToInputWeightsValue,
999 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1000 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1001 inputGateBiasDimensions, inputGateBiasValue,
1002 forgetGateBiasDimensions, forgetGateBiasValue,
1003 cellBiasDimensions, cellBiasValue,
1004 outputGateBiasDimensions, outputGateBiasValue,
1005 projectionWeightsDimensions, projectionWeightsValue,
1006 projectionBiasDimensions, projectionBiasValue,
1007 outputStateInDimensions, outputStateInValue,
1008 cellStateInDimensions, cellStateInValue,
1009 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1010 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1011 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1012 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1013 cellClipValue,
1014 projectionClipValue,
1015 inputIntermediateScale,
1016 forgetIntermediateScale,
1017 cellIntermediateScale,
1018 outputIntermediateScale,
1019 hiddenStateZeroPoint,
1020 hiddenStateScale,
1021 outputStateOutDimensions, outputStateOutValue,
1022 cellStateOutDimensions, cellStateOutValue,
1023 outputDimensions, outputValue,
1024 compute);
1025}
1026
Sadik Armagan6a903a72020-05-26 10:41:54 +01001027} // anonymous namespace
1028
Sadik Armagan57aebf62020-05-27 13:46:35 +01001029// Support is not added yet
1030//BOOST_DATA_TEST_CASE(QLSTMWithProjectionTest, COMPUTE_DEVICES)
1031//{
1032// QLstmWithProjection(sample);
1033//}
Sadik Armagan6a903a72020-05-26 10:41:54 +01001034
1035BOOST_DATA_TEST_CASE(QLSTMWithNoProjectionTest, COMPUTE_DEVICES)
1036{
1037 QLstmWithNoProjection(sample);
1038}
1039
Sadik Armagan34db1872020-09-03 15:22:29 +01001040BOOST_DATA_TEST_CASE(DynamicOutputQLSTMWithNoProjectionTest, COMPUTE_DEVICES)
1041{
1042 DynamicOutputQLstmWithNoProjection(sample);
1043}
1044
Sadik Armagan6a903a72020-05-26 10:41:54 +01001045BOOST_AUTO_TEST_SUITE_END()