blob: 27e52a600f09c42fa5b148991c1a1413e97d6dd8 [file] [log] [blame]
Sadik Armagan6a903a72020-05-26 10:41:54 +01001//
2// Copyright © 2020 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "../DriverTestHelpers.hpp"
7#include "../TestTensor.hpp"
8
9#include "../1.3/HalPolicy.hpp"
10
11#include <armnn/utility/IgnoreUnused.hpp>
12
Sadik Armagan6a903a72020-05-26 10:41:54 +010013#include <boost/test/unit_test.hpp>
14#include <boost/test/data/test_case.hpp>
15#include <boost/math/special_functions/relative_difference.hpp>
16
Colm Doneland7fdbe22020-10-30 16:57:43 +000017#include <array>
18
Sadik Armagan6a903a72020-05-26 10:41:54 +010019BOOST_AUTO_TEST_SUITE(QLSTMTests)
20
21using ArmnnDriver = armnn_driver::ArmnnDriver;
22using DriverOptions = armnn_driver::DriverOptions;
23
24using namespace driverTestHelpers;
25using namespace android::hardware;
26
27using HalPolicy = hal_1_3::HalPolicy;
28
29namespace
30{
31
32template<typename T>
33RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
34{
Sadik Armagan188675f2021-02-12 17:16:42 +000035 V1_0::DataLocation inputInloc = {};
Sadik Armagan6a903a72020-05-26 10:41:54 +010036 inputInloc.poolIndex = poolIndex;
37 inputInloc.offset = 0;
38 inputInloc.length = value.size() * sizeof(T);
39 RequestArgument inputRequestArgument = {};
40 inputRequestArgument.location = inputInloc;
41 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
42 return inputRequestArgument;
43}
44
45// Returns true if the relative difference between two float values is less than the tolerance value given.
46// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
47bool TolerantCompareEqual(float a, float b, float tolerance = 1.0f)
48{
49 float rd;
50 if (a == 0.0f)
51 {
52 rd = fabs(b);
53 }
54 else if (b == 0.0f)
55 {
56 rd = fabs(a);
57 }
58 else
59 {
60 rd = boost::math::relative_difference(a, b);
61 }
62 return rd < tolerance;
63}
64
65// Helper function to create an OperandLifeTime::NO_VALUE for testing.
66// To be used on optional input operands that have no values - these are valid and should be tested.
67HalPolicy::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
68{
69 // Only create a NO_VALUE for optional operands that have no elements
70 if (dimensions.size() == 0 || dimensions[0] == 0)
71 {
72 return HalPolicy::OperandLifeTime::NO_VALUE;
73 }
74 return HalPolicy::OperandLifeTime::CONSTANT_COPY;
75}
76
77void ExecuteModel(const armnn_driver::hal_1_3::HalPolicy::Model& model,
78 armnn_driver::ArmnnDriver& driver,
79 const V1_0::Request& request)
80{
81 android::sp<V1_3::IPreparedModel> preparedModel = PrepareModel_1_3(model, driver);
82 if (preparedModel.get() != nullptr)
83 {
84 Execute(preparedModel, request);
85 }
86}
87
88#ifndef ARMCOMPUTECL_ENABLED
Colm Doneland7fdbe22020-10-30 16:57:43 +000089static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
Sadik Armagan6a903a72020-05-26 10:41:54 +010090#else
Colm Doneland7fdbe22020-10-30 16:57:43 +000091static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::CpuAcc }};
Sadik Armagan6a903a72020-05-26 10:41:54 +010092#endif
93
94// Add our own tests here since we skip the qlstm tests which Google supplies (because of non-const weights)
95void QLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
96 const std::vector<int8_t>& inputValue,
97 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
98 const std::vector<int8_t>& inputToInputWeightsValue,
99 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
100 const std::vector<int8_t>& inputToForgetWeightsValue,
101 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
102 const std::vector<int8_t>& inputToCellWeightsValue,
103 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
104 const std::vector<int8_t>& inputToOutputWeightsValue,
105 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
106 const std::vector<int8_t>& recurrentToInputWeightsValue,
107 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
108 const std::vector<int8_t>& recurrentToForgetWeightsValue,
109 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
110 const std::vector<int8_t>& recurrentToCellWeightsValue,
111 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
112 const std::vector<int8_t>& recurrentToOutputWeightsValue,
113 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
114 const std::vector<int16_t>& cellToInputWeightsValue,
115 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
116 const std::vector<int16_t>& cellToForgetWeightsValue,
117 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
118 const std::vector<int16_t>& cellToOutputWeightsValue,
119 const hidl_vec<uint32_t>& inputGateBiasDimensions,
120 const std::vector<int32_t>& inputGateBiasValue,
121 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
122 const std::vector<int32_t>& forgetGateBiasValue,
123 const hidl_vec<uint32_t>& cellBiasDimensions,
124 const std::vector<int32_t>& cellBiasValue,
125 const hidl_vec<uint32_t>& outputGateBiasDimensions,
126 const std::vector<int32_t>& outputGateBiasValue,
127 const hidl_vec<uint32_t>& projectionWeightsDimensions,
128 const std::vector<int8_t>& projectionWeightsValue,
129 const hidl_vec<uint32_t>& projectionBiasDimensions,
130 const std::vector<int32_t>& projectionBiasValue,
131 const hidl_vec<uint32_t>& outputPreviousTimeStepInDimensions,
132 const std::vector<int8_t>& outputPreviousTimeStepInValue,
133 const hidl_vec<uint32_t>& cellStatePreviousTimeStepInDimensions,
134 const std::vector<int16_t>& cellStatePreviousTimeStepInValue,
135 const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
136 const std::vector<int16_t>& inputLayerNormWeightsValue,
137 const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
138 const std::vector<int16_t>& forgetLayerNormWeightsValue,
139 const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
140 const std::vector<int16_t>& cellLayerNormWeightsValue,
141 const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
142 const std::vector<int16_t>& outputLayerNormWeightsValue,
143 const float& cellClipValue,
144 const float& projectionClipValue,
145 const float& matMulInputGateValue,
146 const float& matMulForgetGateValue,
147 const float& matMulCellGateValue,
148 const float& matMulOutputGateValue,
149 const int32_t& projInputZeroPointValue,
150 const float& projInputScaleValue,
151 const hidl_vec<uint32_t>& outputStateOutDimensions,
152 const std::vector<int8_t>& outputStateOutValue,
153 const hidl_vec<uint32_t>& cellStateOutDimensions,
154 const std::vector<int16_t>& cellStateOutValue,
155 const hidl_vec<uint32_t>& outputDimensions,
156 const std::vector<int8_t>& outputValue,
157 armnn::Compute compute)
158{
159 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
160 HalPolicy::Model model = {};
161
162 // Scale/Offset quantization info
163 float inputScale = 0.0078125f;
164 int32_t inputOffset = 0;
165
166 int32_t hiddenStateZeroPoint = 0;
167 float hiddenStateScale = 0.007f;
168
169 float outputScale = hiddenStateScale;
170 int32_t outputOffset = hiddenStateZeroPoint;
171
172 float cellStateScale = 3.05176e-05f;
173 float cellWeightsScale = 1.0f;
174 int32_t cellStateOffset = 0;
175
176 float weightsScale = 0.00784314f;
177 int32_t weightsOffset = 0;
178
179 float layerNormScale = 3.05182e-05f;
180 int32_t layerNormOffset = 0;
181
182 float biasScale = layerNormScale / 1024;
183 int32_t biasOffset = 0;
184
185 // Inputs:
186 // 00: The input to the LSTM cell. Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, inputSize]
187 AddInputOperand<HalPolicy>(model,
188 inputDimensions,
189 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
190 inputScale,
191 inputOffset);
192
193 // 01: The input-to-input weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
194 AddTensorOperand<HalPolicy>(model,
195 inputToInputWeightsDimensions,
196 inputToInputWeightsValue,
197 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
198 CreateNoValueLifeTime(inputToInputWeightsDimensions),
199 weightsScale,
200 weightsOffset);
201
202 // 02: The input-to-forget weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
203 AddTensorOperand<HalPolicy>(model,
204 inputToForgetWeightsDimensions,
205 inputToForgetWeightsValue,
206 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
207 CreateNoValueLifeTime(inputToForgetWeightsDimensions),
208 weightsScale,
209 weightsOffset);
210
211 // 03: The input-to-cell weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
212 AddTensorOperand<HalPolicy>(model,
213 inputToCellWeightsDimensions,
214 inputToCellWeightsValue,
215 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
216 CreateNoValueLifeTime(inputToCellWeightsDimensions),
217 weightsScale,
218 weightsOffset);
219
220 // 04: The input-to-output weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize]
221 AddTensorOperand<HalPolicy>(model,
222 inputToOutputWeightsDimensions,
223 inputToOutputWeightsValue,
224 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
225 CreateNoValueLifeTime(inputToOutputWeightsDimensions),
226 weightsScale,
227 weightsOffset);
228
229 // 05: The recurrent-to-input weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM
230 // Shape: [numUnits, outputSize]
231 AddTensorOperand<HalPolicy>(model,
232 recurrentToInputWeightsDimensions,
233 recurrentToInputWeightsValue,
234 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
235 CreateNoValueLifeTime(recurrentToInputWeightsDimensions),
236 weightsScale,
237 weightsOffset);
238
239 // 06: The recurrent-to-forget weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize]
240 AddTensorOperand<HalPolicy>(model,
241 recurrentToForgetWeightsDimensions,
242 recurrentToForgetWeightsValue,
243 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
244 CreateNoValueLifeTime(recurrentToForgetWeightsDimensions),
245 weightsScale,
246 weightsOffset);
247
248 // 07: The recurrent-to-cell weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize]
249 AddTensorOperand<HalPolicy>(model,
250 recurrentToCellWeightsDimensions,
251 recurrentToCellWeightsValue,
252 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
253 CreateNoValueLifeTime(recurrentToCellWeightsDimensions),
254 weightsScale,
255 weightsOffset);
256
257 // 08: The recurrent-to-output weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize]
258 AddTensorOperand<HalPolicy>(model,
259 recurrentToOutputWeightsDimensions,
260 recurrentToOutputWeightsValue,
261 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
262 CreateNoValueLifeTime(recurrentToOutputWeightsDimensions),
263 weightsScale,
264 weightsOffset);
265
266 // 09: The cell-to-input weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
267 // Shape: [numUnits]
268 AddTensorOperand<HalPolicy>(model,
269 cellToInputWeightsDimensions,
270 cellToInputWeightsValue,
271 HalPolicy::OperandType::TENSOR_QUANT16_SYMM ,
272 CreateNoValueLifeTime(cellToInputWeightsDimensions),
273 cellWeightsScale,
274 weightsOffset);
275
276 // 10: The cell-to-forget weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
277 // Shape: [numUnits].
278 AddTensorOperand<HalPolicy>(model,
279 cellToForgetWeightsDimensions,
280 cellToForgetWeightsValue,
281 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
282 CreateNoValueLifeTime(cellToForgetWeightsDimensions),
283 cellWeightsScale,
284 weightsOffset);
285
286 // 11: The cell-to-output weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
287 // Shape: [numUnits]
288 AddTensorOperand<HalPolicy>(model,
289 cellToOutputWeightsDimensions,
290 cellToOutputWeightsValue,
291 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
292 CreateNoValueLifeTime(cellToOutputWeightsDimensions),
293 cellWeightsScale,
294 weightsOffset);
295
296 // 12: The input gate bias. Quantized with scale being the product of input and weights scales
297 // and zeroPoint equal to 0. Optional. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
298 AddTensorOperand<HalPolicy>(model,
299 inputGateBiasDimensions,
300 inputGateBiasValue,
301 HalPolicy::OperandType::TENSOR_INT32,
302 CreateNoValueLifeTime(inputGateBiasDimensions),
303 biasScale,
304 biasOffset);
305
306 // 13: The forget gate bias. Quantized with scale being the product of input and weights scales
307 // and zeroPoint equal to 0. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
308 AddTensorOperand<HalPolicy>(model,
309 forgetGateBiasDimensions,
310 forgetGateBiasValue,
311 HalPolicy::OperandType::TENSOR_INT32,
312 CreateNoValueLifeTime(forgetGateBiasDimensions),
313 biasScale,
314 biasOffset);
315
316 // 14: The cell bias. Quantized with scale being the product of input and weights scales and zeroPoint equal to 0.
317 // Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
318 AddTensorOperand<HalPolicy>(model,
319 cellBiasDimensions,
320 cellBiasValue,
321 HalPolicy::OperandType::TENSOR_INT32,
322 CreateNoValueLifeTime(cellBiasDimensions),
323 biasScale,
324 biasOffset);
325
326 // 15: The output gate bias. Quantized with scale being the product of input and weights scales
327 // and zeroPoint equal to 0. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits]
328 AddTensorOperand<HalPolicy>(model,
329 outputGateBiasDimensions,
330 outputGateBiasValue,
331 HalPolicy::OperandType::TENSOR_INT32,
332 CreateNoValueLifeTime(outputGateBiasDimensions),
333 biasScale,
334 biasOffset);
335
336 // 16: The projection weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [outputSize, numUnits]
337 AddTensorOperand<HalPolicy>(model,
338 projectionWeightsDimensions,
339 projectionWeightsValue,
340 HalPolicy::OperandType::TENSOR_QUANT8_SYMM,
341 CreateNoValueLifeTime(projectionWeightsDimensions),
342 0.00392157f,
343 weightsOffset);
344
345 // 17: The projection bias. Quantized with scale being the product of input and weights scales
346 // and zeroPoint equal to 0. Optional. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [outputSize]
347 AddTensorOperand<HalPolicy>(model,
348 projectionBiasDimensions,
349 projectionBiasValue,
350 HalPolicy::OperandType::TENSOR_INT32,
351 CreateNoValueLifeTime(projectionBiasDimensions),
352 0.0f,
353 biasOffset);
354
355 // 18: The output from the previous time step. Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
356 // Shape: [batchSize, outputSize]
357 AddInputOperand<HalPolicy>(model,
358 outputPreviousTimeStepInDimensions,
359 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
360 cellStateScale,
361 inputOffset);
362
363 // 19: The cell state from the previous time step. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM
364 // Shape: [batchSize, numUnits]
365 AddInputOperand<HalPolicy>(model,
366 cellStatePreviousTimeStepInDimensions,
367 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
368 cellStateScale,
369 cellStateOffset);
370
371 // If any of the tensors have a value all normalization tensors are set
372 if (!inputLayerNormWeightsValue.empty() ||
373 !forgetLayerNormWeightsValue.empty() ||
374 !cellLayerNormWeightsValue.empty() ||
375 !outputLayerNormWeightsValue.empty())
376 {
377 // Normalization:
378 // 20: The input layer normalization weights. Used to rescale normalized inputs to activation at input gate.
379 // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
380 AddTensorOperand<HalPolicy>(model,
381 inputLayerNormWeightsDimensions,
382 inputLayerNormWeightsValue,
383 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
384 CreateNoValueLifeTime(inputLayerNormWeightsDimensions),
385 layerNormScale,
386 layerNormOffset);
387
388 // 21: The forget layer normalization weights. Used to rescale normalized inputs to activation at forget gate.
389 // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
390 AddTensorOperand<HalPolicy>(model,
391 forgetLayerNormWeightsDimensions,
392 forgetLayerNormWeightsValue,
393 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
394 CreateNoValueLifeTime(forgetLayerNormWeightsDimensions),
395 layerNormScale,
396 layerNormOffset);
397
398 // 22: The cell layer normalization weights. Used to rescale normalized inputs to activation at cell gate.
399 // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
400 AddTensorOperand<HalPolicy>(model,
401 cellLayerNormWeightsDimensions,
402 cellLayerNormWeightsValue,
403 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
404 CreateNoValueLifeTime(cellLayerNormWeightsDimensions),
405 layerNormScale,
406 layerNormOffset);
407
408 // 23: The output layer normalization weights. Used to rescale normalized inputs to activation at output gate.
409 // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits]
410 AddTensorOperand<HalPolicy>(model,
411 outputLayerNormWeightsDimensions,
412 outputLayerNormWeightsValue,
413 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
414 CreateNoValueLifeTime(outputLayerNormWeightsDimensions),
415 layerNormScale,
416 layerNormOffset);
417 }
418
419 // Constant scalar values
420 // 24: The cell clip. If provided the cell state is clipped by this value prior to the cell output activation.
421 // Optional. Type: ANEURALNETWORKS_FLOAT32.
422 AddFloatOperand<HalPolicy>(model, cellClipValue);
423
424 // Constant scalar values
425 // 25: The projection clip. If provided and projection is enabled, this is used for clipping the projected values.
426 // Optional. Type: ANEURALNETWORKS_FLOAT32.
427 AddFloatOperand<HalPolicy>(model, projectionClipValue);
428
429 // Constant scalar values
430 // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate.
431 // Type: ANEURALNETWORKS_FLOAT32.
432 AddFloatOperand<HalPolicy>(model, matMulInputGateValue);
433
434 // Constant scalar values
435 // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate.
436 // Type: ANEURALNETWORKS_FLOAT32.
437 AddFloatOperand<HalPolicy>(model, matMulForgetGateValue);
438
439 // Constant scalar values
440 // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate.
441 // Type: ANEURALNETWORKS_FLOAT32.
442 AddFloatOperand<HalPolicy>(model, matMulCellGateValue);
443
444 // Constant scalar values
445 // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate.
446 // Type: ANEURALNETWORKS_FLOAT32.
447 AddFloatOperand<HalPolicy>(model, matMulOutputGateValue);
448
449 // Constant scalar values
450 // 30: The zero point of the hidden state, i.e. input to projection. Type: ANEURALNETWORKS_INT32.
451 AddIntOperand<HalPolicy>(model, projInputZeroPointValue);
452
453 // Constant scalar values
454 // 31: The scale of the hidden state, i.e. input to projection. Type: ANEURALNETWORKS_FLOAT32.
455 AddFloatOperand<HalPolicy>(model, projInputScaleValue);
456
457 // Outputs:
458 // 0: The output state (out). Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, outputSize]
459 AddOutputOperand<HalPolicy>(model,
460 outputStateOutDimensions,
461 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
462 cellStateScale,
463 cellStateScale);
464
465 // 1: The cell state (out). Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [batchSize, numUnits].
466 AddOutputOperand<HalPolicy>(model,
467 cellStateOutDimensions,
468 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
469 cellStateScale,
470 cellStateOffset);
471
472 // 2: The output. This is effectively the same as the current "output state (out)" value.
473 // Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, outputSize]
474 AddOutputOperand<HalPolicy>(model,
475 outputDimensions,
476 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
477 cellStateScale,
478 cellStateScale);
479
480 // make the QUANTIZED_LSTM operation
481 model.main.operations.resize(1);
482 model.main.operations[0].type = HalPolicy::OperationType::QUANTIZED_LSTM;
483
484 model.main.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
485 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
486 24, 25, 26, 27, 28, 29, 30, 31};
487 model.main.operations[0].outputs = hidl_vec<uint32_t> {32, 33, 34};
488
489 // define the input values
490 hidl_vec<RequestArgument> inputArguments;
491 inputArguments.resize(3);
492
493 inputArguments[0] = CreateRequestArgument<int8_t>(inputValue, 0);
494 inputArguments[1] = CreateRequestArgument<int8_t>(outputPreviousTimeStepInValue, 1);
495 inputArguments[2] = CreateRequestArgument<int16_t>(cellStatePreviousTimeStepInValue, 2);
496
497 // define the expected output values
498 hidl_vec<RequestArgument> outputArguments;
499 outputArguments.resize(3);
500
501 outputArguments[0] = CreateRequestArgument<int8_t>(outputStateOutValue, 3);
502 outputArguments[1] = CreateRequestArgument<int16_t>(cellStateOutValue, 4);
503 outputArguments[2] = CreateRequestArgument<int8_t>(outputValue, 5);
504
505 android::hardware::neuralnetworks::V1_0::Request request = {};
506 request.inputs = inputArguments;
507 request.outputs = outputArguments;
508
509 // set the input data
510 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
511 AddPoolAndSetData(outputPreviousTimeStepInValue.size(), request, outputPreviousTimeStepInValue.data());
512 AddPoolAndSetData(cellStatePreviousTimeStepInValue.size(), request, cellStatePreviousTimeStepInValue.data());
513
514 // add memory for the outputs
515 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData<int8_t>(outputStateOutValue.size(), request);
516 int8_t* outputStateOutData = static_cast<int8_t*>(static_cast<void*>(outputStateOutMemory->getPointer()));
517
518 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<int16_t>(cellStateOutValue.size(), request);
519 int16_t* cellStateOutData = static_cast<int16_t*>(static_cast<void*>(cellStateOutMemory->getPointer()));
520
521 android::sp<IMemory> outputMemory = AddPoolAndGetData<int8_t>(outputValue.size(), request);
522 int8_t* outputData = static_cast<int8_t*>(static_cast<void*>(outputMemory->getPointer()));
523
524 // make the prepared model and run the execution
525 ExecuteModel(model, *driver, request);
526
527 // check the results
528 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
529 {
530 BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
531 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
532 }
533
534 // CELL STATE OUTPUT Does not match currently: IVGCVSW-4860 Verify remaining VTS tests (2) for QLSTM
535 // Comment out for now
536 // for (size_t i = 0; i < cellStateOutValue.size(); ++i)
537 // {
538 // BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
539 // "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
540 //}
541
542 for (size_t i = 0; i < outputValue.size(); ++i)
543 {
544 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
545 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
546 }
547}
548
549void QLstmWithProjection(armnn::Compute compute)
550{
551 // This replicates android/frameworks/ml/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py
552 // with values from android/frameworks/ml/nn/runtime/test/generated/spec_V1_3/qlstm_projection.example.cpp
553 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of SUBGRAPH_INPUT tensors).
554
555 uint32_t batchSize = 2;
556 uint32_t inputSize = 5;
557 uint32_t outputSize = 3;
558 uint32_t numUnits = 4;
559
560 // Inputs:
561 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
562 std::vector<int8_t> inputValue{ 90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
563
564 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
565 std::vector<int8_t> inputToInputWeightsValue{ 64, 77, 89, -102,
566 -115, 13, 25, 38,
567 -51, 64, -102, 89,
568 -77, 64, -51, -64,
569 -51, -38, -25, -13 };
570
571 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
572 std::vector<int8_t> inputToForgetWeightsValue{ -77, -13, 38, 25,
573 115, -64, -25, -51,
574 38, -102, -51, 38,
575 -64, -51, -77, 38,
576 -51, -77, -64, -64 };
577
578 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
579 std::vector<int8_t> inputToCellWeightsValue{ -51, -38, -25, -13,
580 -64, 64, -25, -38,
581 -25, -77, 77, -13,
582 -51, -38, -89, 89,
583 -115, -64, 102, 77 };
584
585 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
586 std::vector<int8_t> inputToOutputWeightsValue{ -102, -51, -25, -115,
587 -13, -89, 38, -38,
588 -102, -25, 77, -25,
589 51, -89, -38, -64,
590 13, 64, -77, -51 };
591
592 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
593 std::vector<int8_t> recurrentToInputWeightsValue{ -25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77 };
594
595 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
596 std::vector<int8_t> recurrentToForgetWeightsValue{ -64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25 };
597
598 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
599 std::vector<int8_t> recurrentToCellWeightsValue{ -38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25 };
600
601 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
602 std::vector<int8_t> recurrentToOutputWeightsValue{ 38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25 };
603
604 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
605 std::vector<int16_t> cellToInputWeightsValue;
606
607 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
608 std::vector<int16_t> cellToForgetWeightsValue;
609
610 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
611 std::vector<int16_t> cellToOutputWeightsValue;
612
613 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
614 std::vector<int32_t> inputGateBiasValue{ 644245, 3221226, 4724464, 8160438 };
615
616 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
617 std::vector<int32_t> forgetGateBiasValue{ 2147484, -6442451, -4294968, 2147484 };
618
619 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
620 std::vector<int32_t> cellBiasValue{-1073742, 15461883, 5368709, 1717987 };
621
622 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
623 std::vector<int32_t> outputGateBiasValue{ 1073742, -214748, 4294968, 2147484 };
624
625 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
626 std::vector<int8_t> projectionWeightsValue{ -25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51 };
627
628 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
629 std::vector<int32_t> projectionBiasValue{ 0, 0, 0 };
630
631 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
632 std::vector<int8_t> outputStateInValue{ 0, 0, 0, 0, 0, 0 };
633
634 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
635 std::vector<int16_t> cellStateInValue{ 0, 0, 0, 0, 0, 0, 0, 0 };
636
637 // Normalization:
638 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
639 std::vector<int16_t> inputLayerNormWeightsValue{ 3277, 6553, 9830, 16384 };
640
641 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
642 std::vector<int16_t> forgetLayerNormWeightsValue{ 6553, 6553, 13107, 9830 };
643
644 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
645 std::vector<int16_t> cellLayerNormWeightsValue{ 22937, 6553, 9830, 26214 };
646
647 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
648 std::vector<int16_t> outputLayerNormWeightsValue{ 19660, 6553, 6553, 16384 };
649
650 float cellClipValue = 0.0f;
651 float projectionClipValue = 0.0f;
652 float inputIntermediateScale = 0.007059f;
653 float forgetIntermediateScale = 0.007812f;
654 float cellIntermediateScale = 0.007059f;
655 float outputIntermediateScale = 0.007812f;
656 int32_t hiddenStateZeroPoint = 0;
657 float hiddenStateScale = 0.007f;
658
659 // Outputs:
660 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
661 std::vector<int8_t> outputStateOutValue{ 127, 127, -108, -67, 127, 127 };
662
663 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
664 std::vector<int16_t> cellStateOutValue { -14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939 };
665
666 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
667 std::vector<int8_t> outputValue { 127, 127, -108, -67, 127, 127 };
668
669 QLstmTestImpl(inputDimensions, inputValue,
670 inputToInputWeightsDimensions, inputToInputWeightsValue,
671 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
672 inputToCellWeightsDimensions, inputToCellWeightsValue,
673 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
674 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
675 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
676 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
677 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
678 cellToInputWeightsDimensions, cellToInputWeightsValue,
679 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
680 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
681 inputGateBiasDimensions, inputGateBiasValue,
682 forgetGateBiasDimensions, forgetGateBiasValue,
683 cellBiasDimensions, cellBiasValue,
684 outputGateBiasDimensions, outputGateBiasValue,
685 projectionWeightsDimensions, projectionWeightsValue,
686 projectionBiasDimensions, projectionBiasValue,
687 outputStateInDimensions, outputStateInValue,
688 cellStateInDimensions, cellStateInValue,
689 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
690 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
691 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
692 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
693 cellClipValue,
694 projectionClipValue,
695 inputIntermediateScale,
696 forgetIntermediateScale,
697 cellIntermediateScale,
698 outputIntermediateScale,
699 hiddenStateZeroPoint,
700 hiddenStateScale,
701 outputStateOutDimensions, outputStateOutValue,
702 cellStateOutDimensions, cellStateOutValue,
703 outputDimensions, outputValue,
704 compute);
705}
706
707void QLstmWithNoProjection(armnn::Compute compute)
708{
709 // This replicates android/frameworks/ml/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py
710 // with values from android/frameworks/ml/nn/runtime/test/generated/spec_V1_3/qlstm_noprojection.example.cpp
711 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of SUBGRAPH_INPUT tensors).
712
713 uint32_t batchSize = 2;
714 uint32_t inputSize = 5;
715 uint32_t outputSize = 4;
716 uint32_t numUnits = 4;
717
718 // Inputs:
719 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
720 std::vector<int8_t> inputValue { 90, 102, 13, 26, 38, 102, 13, 26, 51, 64 };
721
722 hidl_vec<uint32_t> inputToInputWeightsDimensions{0, 0};
723 std::vector<int8_t> inputToInputWeightsValue;
724
725 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
726 std::vector<int8_t> inputToForgetWeightsValue { -77, -13, 38, 25, 115,
727 -64, -25, -51, 38, -102,
728 -51, 38, -64, -51, -77,
729 38, -51, -77, -64, -64 };
730
731 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
732 std::vector<int8_t> inputToCellWeightsValue { -51, -38, -25, -13, -64,
733 64, -25, -38, -25, -77,
734 77, -13, -51, -38, -89,
735 89, -115, -64, 102, 77 };
736
737 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
738 std::vector<int8_t> inputToOutputWeightsValue { -102, -51, -25, -115, -13,
739 -89, 38, -38, -102, -25,
740 77, -25, 51, -89, -38,
741 -64, 13, 64, -77, -51 };
742
743 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0, 0};
744 std::vector<int8_t> recurrentToInputWeightsValue;
745
746 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
747 std::vector<int8_t> recurrentToForgetWeightsValue { -64, -38, -64, -25,
748 77, 51, 115, 38,
749 -13, 25, 64, 25,
750 25, 38, -13, 51 };
751
752 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
753 std::vector<int8_t> recurrentToCellWeightsValue { -38, 25, 13, -38,
754 102, -10, -25, 38,
755 102, -77, -13, 25,
756 38, -13, 25, 64 };
757
758 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
759 std::vector<int8_t> recurrentToOutputWeightsValue { 38, -13, 13, -25,
760 -64, -89, -25, -77,
761 -13, -51, -89, -25,
762 13, 64, 25, -38 };
763
764 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
765 std::vector<int16_t> cellToInputWeightsValue;
766
767 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
768 std::vector<int16_t> cellToForgetWeightsValue;
769
770 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
771 std::vector<int16_t> cellToOutputWeightsValue;
772
773 hidl_vec<uint32_t> inputGateBiasDimensions{0};
774 std::vector<int32_t> inputGateBiasValue;
775
776 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
777 std::vector<int32_t> forgetGateBiasValue { 2147484, -6442451, -4294968, 2147484 };
778
779 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
780 std::vector<int32_t> cellBiasValue { -1073742, 15461883, 5368709, 1717987 };
781
782 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
783 std::vector<int32_t> outputGateBiasValue { 1073742, -214748, 4294968, 2147484 };
784
785 hidl_vec<uint32_t> projectionWeightsDimensions{0, 0};
786 std::vector<int8_t> projectionWeightsValue;
787
788 hidl_vec<uint32_t> projectionBiasDimensions{0};
789 std::vector<int32_t> projectionBiasValue;
790
791 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
792 std::vector<int8_t> outputStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 };
793
794 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
795 std::vector<int16_t> cellStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 };
796
797 // Normalization:
798 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
799 std::vector<int16_t> inputLayerNormWeightsValue;
800
801 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
802 std::vector<int16_t> forgetLayerNormWeightsValue { 6553, 6553, 13107, 9830 };
803
804 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
805 std::vector<int16_t> cellLayerNormWeightsValue { 22937, 6553, 9830, 26214 };
806
807 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
808 std::vector<int16_t> outputLayerNormWeightsValue { 19660, 6553, 6553, 16384 };
809
810 float cellClipValue = 0.0f;
811 float projectionClipValue = 0.0f;
812 float inputIntermediateScale = 0.007059f;
813 float forgetIntermediateScale = 0.007812f;
814 float cellIntermediateScale = 0.007059f;
815 float outputIntermediateScale = 0.007812f;
816 int32_t hiddenStateZeroPoint = 0;
817 float hiddenStateScale = 0.007f;
818
819 // Outputs:
820 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
821 std::vector<int8_t> outputStateOutValue { -15, 21, 14, 20, -15, 15, 5, 27 };
822
823 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
824 std::vector<int16_t> cellStateOutValue { -11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149 };
825
826 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
827 std::vector<int8_t> outputValue { -15, 21, 14, 20, -15, 15, 5, 27 };
828
829 QLstmTestImpl(inputDimensions, inputValue,
830 inputToInputWeightsDimensions, inputToInputWeightsValue,
831 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
832 inputToCellWeightsDimensions, inputToCellWeightsValue,
833 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
834 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
835 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
836 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
837 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
838 cellToInputWeightsDimensions, cellToInputWeightsValue,
839 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
840 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
841 inputGateBiasDimensions, inputGateBiasValue,
842 forgetGateBiasDimensions, forgetGateBiasValue,
843 cellBiasDimensions, cellBiasValue,
844 outputGateBiasDimensions, outputGateBiasValue,
845 projectionWeightsDimensions, projectionWeightsValue,
846 projectionBiasDimensions, projectionBiasValue,
847 outputStateInDimensions, outputStateInValue,
848 cellStateInDimensions, cellStateInValue,
849 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
850 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
851 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
852 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
853 cellClipValue,
854 projectionClipValue,
855 inputIntermediateScale,
856 forgetIntermediateScale,
857 cellIntermediateScale,
858 outputIntermediateScale,
859 hiddenStateZeroPoint,
860 hiddenStateScale,
861 outputStateOutDimensions, outputStateOutValue,
862 cellStateOutDimensions, cellStateOutValue,
863 outputDimensions, outputValue,
864 compute);
865}
866
Sadik Armagan34db1872020-09-03 15:22:29 +0100867void DynamicOutputQLstmWithNoProjection(armnn::Compute compute)
868{
869 // This replicates android/frameworks/ml/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py
870 // with values from android/frameworks/ml/nn/runtime/test/generated/spec_V1_3/qlstm_noprojection.example.cpp
871 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of SUBGRAPH_INPUT tensors)
872 // and made cellStateOutput dynamic.
873
874 uint32_t batchSize = 2;
875 uint32_t inputSize = 5;
876 uint32_t outputSize = 4;
877 uint32_t numUnits = 4;
878
879 // Inputs:
880 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
881 std::vector<int8_t> inputValue { 90, 102, 13, 26, 38, 102, 13, 26, 51, 64 };
882
883 hidl_vec<uint32_t> inputToInputWeightsDimensions{0, 0};
884 std::vector<int8_t> inputToInputWeightsValue;
885
886 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
887 std::vector<int8_t> inputToForgetWeightsValue { -77, -13, 38, 25, 115,
888 -64, -25, -51, 38, -102,
889 -51, 38, -64, -51, -77,
890 38, -51, -77, -64, -64 };
891
892 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
893 std::vector<int8_t> inputToCellWeightsValue { -51, -38, -25, -13, -64,
894 64, -25, -38, -25, -77,
895 77, -13, -51, -38, -89,
896 89, -115, -64, 102, 77 };
897
898 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
899 std::vector<int8_t> inputToOutputWeightsValue { -102, -51, -25, -115, -13,
900 -89, 38, -38, -102, -25,
901 77, -25, 51, -89, -38,
902 -64, 13, 64, -77, -51 };
903
904 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0, 0};
905 std::vector<int8_t> recurrentToInputWeightsValue;
906
907 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
908 std::vector<int8_t> recurrentToForgetWeightsValue { -64, -38, -64, -25,
909 77, 51, 115, 38,
910 -13, 25, 64, 25,
911 25, 38, -13, 51 };
912
913 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
914 std::vector<int8_t> recurrentToCellWeightsValue { -38, 25, 13, -38,
915 102, -10, -25, 38,
916 102, -77, -13, 25,
917 38, -13, 25, 64 };
918
919 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
920 std::vector<int8_t> recurrentToOutputWeightsValue { 38, -13, 13, -25,
921 -64, -89, -25, -77,
922 -13, -51, -89, -25,
923 13, 64, 25, -38 };
924
925 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
926 std::vector<int16_t> cellToInputWeightsValue;
927
928 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
929 std::vector<int16_t> cellToForgetWeightsValue;
930
931 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
932 std::vector<int16_t> cellToOutputWeightsValue;
933
934 hidl_vec<uint32_t> inputGateBiasDimensions{0};
935 std::vector<int32_t> inputGateBiasValue;
936
937 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
938 std::vector<int32_t> forgetGateBiasValue { 2147484, -6442451, -4294968, 2147484 };
939
940 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
941 std::vector<int32_t> cellBiasValue { -1073742, 15461883, 5368709, 1717987 };
942
943 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
944 std::vector<int32_t> outputGateBiasValue { 1073742, -214748, 4294968, 2147484 };
945
946 hidl_vec<uint32_t> projectionWeightsDimensions{0, 0};
947 std::vector<int8_t> projectionWeightsValue;
948
949 hidl_vec<uint32_t> projectionBiasDimensions{0};
950 std::vector<int32_t> projectionBiasValue;
951
952 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
953 std::vector<int8_t> outputStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 };
954
955 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
956 std::vector<int16_t> cellStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 };
957
958 // Normalization:
959 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
960 std::vector<int16_t> inputLayerNormWeightsValue;
961
962 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
963 std::vector<int16_t> forgetLayerNormWeightsValue { 6553, 6553, 13107, 9830 };
964
965 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
966 std::vector<int16_t> cellLayerNormWeightsValue { 22937, 6553, 9830, 26214 };
967
968 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
969 std::vector<int16_t> outputLayerNormWeightsValue { 19660, 6553, 6553, 16384 };
970
971 float cellClipValue = 0.0f;
972 float projectionClipValue = 0.0f;
973 float inputIntermediateScale = 0.007059f;
974 float forgetIntermediateScale = 0.007812f;
975 float cellIntermediateScale = 0.007059f;
976 float outputIntermediateScale = 0.007812f;
977 int32_t hiddenStateZeroPoint = 0;
978 float hiddenStateScale = 0.007f;
979
980 // Outputs:
981 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
982 std::vector<int8_t> outputStateOutValue { -15, 21, 14, 20, -15, 15, 5, 27 };
983
984 hidl_vec<uint32_t> cellStateOutDimensions{};
985 std::vector<int16_t> cellStateOutValue { -11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149 };
986
987 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
988 std::vector<int8_t> outputValue { -15, 21, 14, 20, -15, 15, 5, 27 };
989
990 QLstmTestImpl(inputDimensions, inputValue,
991 inputToInputWeightsDimensions, inputToInputWeightsValue,
992 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
993 inputToCellWeightsDimensions, inputToCellWeightsValue,
994 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
995 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
996 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
997 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
998 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
999 cellToInputWeightsDimensions, cellToInputWeightsValue,
1000 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1001 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1002 inputGateBiasDimensions, inputGateBiasValue,
1003 forgetGateBiasDimensions, forgetGateBiasValue,
1004 cellBiasDimensions, cellBiasValue,
1005 outputGateBiasDimensions, outputGateBiasValue,
1006 projectionWeightsDimensions, projectionWeightsValue,
1007 projectionBiasDimensions, projectionBiasValue,
1008 outputStateInDimensions, outputStateInValue,
1009 cellStateInDimensions, cellStateInValue,
1010 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1011 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1012 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1013 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1014 cellClipValue,
1015 projectionClipValue,
1016 inputIntermediateScale,
1017 forgetIntermediateScale,
1018 cellIntermediateScale,
1019 outputIntermediateScale,
1020 hiddenStateZeroPoint,
1021 hiddenStateScale,
1022 outputStateOutDimensions, outputStateOutValue,
1023 cellStateOutDimensions, cellStateOutValue,
1024 outputDimensions, outputValue,
1025 compute);
1026}
1027
Sadik Armagan6a903a72020-05-26 10:41:54 +01001028} // anonymous namespace
1029
Sadik Armagan57aebf62020-05-27 13:46:35 +01001030// Support is not added yet
1031//BOOST_DATA_TEST_CASE(QLSTMWithProjectionTest, COMPUTE_DEVICES)
1032//{
1033// QLstmWithProjection(sample);
1034//}
Sadik Armagan6a903a72020-05-26 10:41:54 +01001035
1036BOOST_DATA_TEST_CASE(QLSTMWithNoProjectionTest, COMPUTE_DEVICES)
1037{
1038 QLstmWithNoProjection(sample);
1039}
1040
Sadik Armagan34db1872020-09-03 15:22:29 +01001041BOOST_DATA_TEST_CASE(DynamicOutputQLSTMWithNoProjectionTest, COMPUTE_DEVICES)
1042{
1043 DynamicOutputQLstmWithNoProjection(sample);
1044}
1045
Sadik Armagan6a903a72020-05-26 10:41:54 +01001046BOOST_AUTO_TEST_SUITE_END()