blob: 579524ca44201e9e7ca132368fa10b19d5d4dace [file] [log] [blame]
telsoa01ce3e84a2018-08-31 09:31:35 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa01ce3e84a2018-08-31 09:31:35 +01004//
5#include "DriverTestHelpers.hpp"
Matteo Martincighc7434122018-11-14 12:27:04 +00006#include "OperationsUtils.h"
7
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +01008#include "../1.0/HalPolicy.hpp"
9
Matteo Martincighc7434122018-11-14 12:27:04 +000010#include <boost/array.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010011#include <boost/test/unit_test.hpp>
Matteo Martincighc7434122018-11-14 12:27:04 +000012#include <boost/test/data/test_case.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010013#include <boost/math/special_functions/relative_difference.hpp>
14#include <log/log.h>
15
telsoa01ce3e84a2018-08-31 09:31:35 +010016#include <cmath>
17
18BOOST_AUTO_TEST_SUITE(LstmTests)
19
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +010020using ArmnnDriver = armnn_driver::ArmnnDriver;
telsoa01ce3e84a2018-08-31 09:31:35 +010021using DriverOptions = armnn_driver::DriverOptions;
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +010022using HalPolicy = armnn_driver::hal_1_0::HalPolicy;
23
telsoa01ce3e84a2018-08-31 09:31:35 +010024using namespace driverTestHelpers;
25using namespace android::hardware;
26
27namespace
28{
29
30template<typename T>
Matteo Martincighc7434122018-11-14 12:27:04 +000031RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
telsoa01ce3e84a2018-08-31 09:31:35 +010032{
33 DataLocation inputInloc = {};
34 inputInloc.poolIndex = poolIndex;
35 inputInloc.offset = 0;
36 inputInloc.length = value.size() * sizeof(T);
37 RequestArgument inputRequestArgument = {};
38 inputRequestArgument.location = inputInloc;
39 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
40 return inputRequestArgument;
41}
42
43// Returns true if the relative difference between two float values is less than the tolerance value given.
44// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
45bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f)
46{
47 float rd;
48 if (a == 0.0f)
49 {
50 rd = fabs(b);
51 }
52 else if (b == 0.0f)
53 {
54 rd = fabs(a);
55 }
56 else
57 {
58 rd = boost::math::relative_difference(a, b);
59 }
60 return rd < tolerance;
61}
62
Kevin Mayf29a2c52019-03-14 11:56:32 +000063// Helper function to create an OperandLifeTime::NO_VALUE for testing.
64// To be used on optional input operands that have no values - these are valid and should be tested.
65OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
66{
67 // Only create a NO_VALUE for optional operands that have no elements
68 if (dimensions.size() == 0 || dimensions[0] == 0)
69 {
70 return OperandLifeTime::NO_VALUE;
71 }
72 return OperandLifeTime::CONSTANT_COPY;
73}
Matteo Martincighc7434122018-11-14 12:27:04 +000074} // anonymous namespace
telsoa01ce3e84a2018-08-31 09:31:35 +010075
76// Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
77
Matteo Martincighc7434122018-11-14 12:27:04 +000078void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
79 const std::vector<float>& inputValue,
80 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
81 const std::vector<float>& inputToInputWeightsValue,
82 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
83 const std::vector<float>& inputToForgetWeightsValue,
84 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
85 const std::vector<float>& inputToCellWeightsValue,
86 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
87 const std::vector<float>& inputToOutputWeightsValue,
88 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
89 const std::vector<float>& recurrentToInputWeightsValue,
90 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
91 const std::vector<float>& recurrentToForgetWeightsValue,
92 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
93 const std::vector<float>& recurrentToCellWeightsValue,
94 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
95 const std::vector<float>& recurrentToOutputWeightsValue,
96 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
97 const std::vector<float>& cellToInputWeightsValue,
98 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
99 const std::vector<float>& cellToForgetWeightsValue,
100 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
101 const std::vector<float>& cellToOutputWeightsValue,
102 const hidl_vec<uint32_t>& inputGateBiasDimensions,
103 const std::vector<float>& inputGateBiasValue,
104 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
105 const std::vector<float>& forgetGateBiasValue,
106 const hidl_vec<uint32_t>& cellBiasDimensions,
107 const std::vector<float>& cellBiasValue,
108 const hidl_vec<uint32_t>& outputGateBiasDimensions,
109 const std::vector<float>& outputGateBiasValue,
110 const hidl_vec<uint32_t>& projectionWeightsDimensions,
111 const std::vector<float>& projectionWeightsValue,
112 const hidl_vec<uint32_t>& projectionBiasDimensions,
113 const std::vector<float>& projectionBiasValue,
114 const hidl_vec<uint32_t>& outputStateInDimensions,
115 const std::vector<float>& outputStateInValue,
116 const hidl_vec<uint32_t>& cellStateInDimensions,
117 const std::vector<float>& cellStateInValue,
118 const hidl_vec<uint32_t>& activationFunctionDimensions,
119 const std::vector<int32_t>& activationFunctionValue,
120 const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
121 const std::vector<float>& cellClippingThresholdValue,
122 const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
123 const std::vector<float>& projectionClippingThresholdValue,
124 const hidl_vec<uint32_t>& scratchBufferDimensions,
125 const std::vector<float>& scratchBufferValue,
126 const hidl_vec<uint32_t>& outputStateOutDimensions,
127 const std::vector<float>& outputStateOutValue,
128 const hidl_vec<uint32_t>& cellStateOutDimensions,
129 const std::vector<float>& cellStateOutValue,
130 const hidl_vec<uint32_t>& outputDimensions,
131 const std::vector<float>& outputValue,
132 armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100133{
Matteo Martincighc7434122018-11-14 12:27:04 +0000134 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100135 HalPolicy::Model model = {};
telsoa01ce3e84a2018-08-31 09:31:35 +0100136
137 // Inputs:
138 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
139 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100140 AddInputOperand<HalPolicy>(model, inputDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100141
142 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
143 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100144 AddTensorOperand<HalPolicy>(model,
145 inputToInputWeightsDimensions,
146 inputToInputWeightsValue,
147 HalPolicy::OperandType::TENSOR_FLOAT32,
148 CreateNoValueLifeTime(inputToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100149 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
150 // [num_units, input_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100151 AddTensorOperand<HalPolicy>(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
152 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
153 // [num_units, input_size].
154 AddTensorOperand<HalPolicy>(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100155 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
156 // [num_units, input_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100157 AddTensorOperand<HalPolicy>(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100158 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
159 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
160 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100161 AddTensorOperand<HalPolicy>(model,
162 recurrentToInputWeightsDimensions,
163 recurrentToInputWeightsValue,
164 HalPolicy::OperandType::TENSOR_FLOAT32,
165 CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100166 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
167 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100168 AddTensorOperand<HalPolicy>(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100169 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
170 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100171 AddTensorOperand<HalPolicy>(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100172 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
173 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100174 AddTensorOperand<HalPolicy>(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100175 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100176 AddTensorOperand<HalPolicy>(model,
177 cellToInputWeightsDimensions,
178 cellToInputWeightsValue,
179 HalPolicy::OperandType::TENSOR_FLOAT32,
180 CreateNoValueLifeTime(cellToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100181 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100182 AddTensorOperand<HalPolicy>(model,
183 cellToForgetWeightsDimensions,
184 cellToForgetWeightsValue,
185 HalPolicy::OperandType::TENSOR_FLOAT32,
186 CreateNoValueLifeTime(cellToForgetWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100187 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100188 AddTensorOperand<HalPolicy>(model,
189 cellToOutputWeightsDimensions,
190 cellToOutputWeightsValue,
191 HalPolicy::OperandType::TENSOR_FLOAT32,
192 CreateNoValueLifeTime(cellToOutputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100193 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100194 AddTensorOperand<HalPolicy>(model,
195 inputGateBiasDimensions,
196 inputGateBiasValue,
197 HalPolicy::OperandType::TENSOR_FLOAT32,
198 CreateNoValueLifeTime(inputGateBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100199 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100200 AddTensorOperand<HalPolicy>(model, forgetGateBiasDimensions, forgetGateBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100201 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100202 AddTensorOperand<HalPolicy>(model, cellBiasDimensions, cellBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100203 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100204 AddTensorOperand<HalPolicy>(model, outputGateBiasDimensions, outputGateBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100205 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
206 // [output_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100207 AddTensorOperand<HalPolicy>(model,
208 projectionWeightsDimensions,
209 projectionWeightsValue,
210 HalPolicy::OperandType::TENSOR_FLOAT32,
211 CreateNoValueLifeTime(projectionWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100212 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100213 AddTensorOperand<HalPolicy>(model,
214 projectionBiasDimensions,
215 projectionBiasValue,
216 HalPolicy::OperandType::TENSOR_FLOAT32,
217 CreateNoValueLifeTime(projectionBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100218
219 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100220 AddInputOperand<HalPolicy>(model, outputStateInDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100221 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100222 AddInputOperand<HalPolicy>(model, cellStateInDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100223
Matteo Martincighc7434122018-11-14 12:27:04 +0000224 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100225 // 20: The activation function: A value indicating the activation function:
226 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100227 AddTensorOperand<HalPolicy>(model,
228 activationFunctionDimensions,
229 activationFunctionValue,
230 HalPolicy::OperandType::INT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100231 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
232 // If set to 0.0 then clipping is disabled.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100233 AddTensorOperand<HalPolicy>(model,
234 cellClippingThresholdDimensions,
235 cellClippingThresholdValue,
236 HalPolicy::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100237 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
238 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100239 AddTensorOperand<HalPolicy>(model,
240 projectionClippingThresholdDimensions,
241 projectionClippingThresholdValue,
242 HalPolicy::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100243
244 // Outputs:
245 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
246 // CIFG, or [batch_size, num_units * 3] without CIFG.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100247 AddOutputOperand<HalPolicy>(model, scratchBufferDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100248 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100249 AddOutputOperand<HalPolicy>(model, outputStateOutDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100250 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100251 AddOutputOperand<HalPolicy>(model, cellStateOutDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100252 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
253 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100254 AddOutputOperand<HalPolicy>(model, outputDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100255
256 // make the lstm operation
257 model.operations.resize(1);
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100258 model.operations[0].type = HalPolicy::OperationType::LSTM;
telsoa01ce3e84a2018-08-31 09:31:35 +0100259 model.operations[0].inputs =
260 hidl_vec<uint32_t> {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
261 model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};
262
263 // define the input values
264 hidl_vec<RequestArgument> inputArguments;
265 inputArguments.resize(3);
266
267 inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
268 inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
269 inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
270
271 // define the expected output values
272 hidl_vec<RequestArgument> outputArguments;
273 outputArguments.resize(4);
274
275 outputArguments[0] = CreateRequestArgument<float>(scratchBufferValue, 3);
276 outputArguments[1] = CreateRequestArgument<float>(outputStateOutValue, 4);
277 outputArguments[2] = CreateRequestArgument<float>(cellStateOutValue, 5);
278 outputArguments[3] = CreateRequestArgument<float>(outputValue, 6);
279
280 Request request = {};
281 request.inputs = inputArguments;
282 request.outputs = outputArguments;
283
284 // set the input data
285 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
286 AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
287 AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
288
289 // add memory for the outputs
290 AddPoolAndGetData(scratchBufferValue.size(), request);
291 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData(outputStateOutValue.size(), request);
292 float* outputStateOutData = static_cast<float*>(static_cast<void*>(outputStateOutMemory->getPointer()));
293 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData(cellStateOutValue.size(), request);
294 float* cellStateOutData = static_cast<float*>(static_cast<void*>(cellStateOutMemory->getPointer()));
295 android::sp<IMemory> outputMemory = AddPoolAndGetData(outputValue.size(), request);
296 float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
297
298 // make the prepared model and run the execution
Sadik Armagane6e54a82019-05-08 10:18:05 +0100299 android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, *driver);
telsoa01ce3e84a2018-08-31 09:31:35 +0100300 if (preparedModel.get() != nullptr)
301 {
302 Execute(preparedModel, request);
303 }
304
305 // check the results
306 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
307 {
308 BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
309 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
310 }
311 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
312 {
313 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
314 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
315 }
316 for (size_t i = 0; i < outputValue.size(); ++i)
317 {
318 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
319 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
320 }
321}
322
Matteo Martincighc7434122018-11-14 12:27:04 +0000323void LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100324{
325 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm.model.cpp
326 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm.example.cpp
327 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
328
Matteo Martincighc7434122018-11-14 12:27:04 +0000329 uint32_t batchSize = 1;
330 uint32_t inputSize = 2;
331 uint32_t numUnits = 4;
332 uint32_t outputSize = numUnits;
333
telsoa01ce3e84a2018-08-31 09:31:35 +0100334 // Inputs:
335 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
336 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000337 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
338 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100339
340 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
341 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000342 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
343 std::vector<float> inputToInputWeightsValue{-0.45018822f, -0.02338299f,
344 -0.08705890f, -0.34550029f,
345 0.04266912f, -0.15680569f,
346 -0.34856534f, 0.43890524f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100347 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
348 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000349 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
350 std::vector<float> inputToForgetWeightsValue{ 0.09701663f, 0.20334584f,
351 -0.50592935f, -0.31343272f,
352 -0.40032279f, 0.44781327f,
353 0.01387155f, -0.35593212f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100354 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000355 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
356 std::vector<float> inputToCellWeightsValue{-0.50013041f, 0.13702840f,
357 0.11810488f, 0.20131630f,
358 -0.20583314f, 0.44344562f,
359 0.22077113f, -0.29909778f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100360 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
361 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000362 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
363 std::vector<float> inputToOutputWeightsValue{-0.25065863f, -0.28290087f,
364 0.04613829f, 0.40525138f,
365 0.44272184f, 0.03897077f,
366 -0.15568960f, 0.19487578f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100367 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
368 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
369 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000370 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
371 std::vector<float> recurrentToInputWeightsValue{-0.00635350f, -0.20423880f, 0.31454784f, -0.35746509f,
372 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f,
373 -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f,
374 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100375 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
376 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000377 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
378 std::vector<float> recurrentToForgetWeightsValue{-0.48684245f, -0.06655136f, 0.42224967f, 0.21126390f,
379 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f,
380 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f,
381 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100382 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
383 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000384 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
385 std::vector<float> recurrentToCellWeightsValue{-0.34074140f, 0.24443203f, -0.20785320f, 0.26320225f,
386 0.05695659f, -0.00123841f, -0.47447860f, -0.35869038f,
387 -0.06418842f, -0.13502428f, -0.50176400f, 0.22830659f,
388 -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100389 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
390 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000391 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
392 std::vector<float> recurrentToOutputWeightsValue{ 0.43385774f, -0.17194885f, 0.27182370f, 0.09215671f,
393 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f,
394 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f,
395 -0.51818722f, -0.15390486f, 0.04681480f, 0.39922136f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100396 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000397 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
398 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100399 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000400 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
401 std::vector<float> cellToForgetWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100402 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000403 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
404 std::vector<float> cellToOutputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100405 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000406 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
407 std::vector<float> inputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100408 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000409 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
410 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100411 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000412 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
413 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100414 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000415 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
416 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100417 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
418 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000419 hidl_vec<uint32_t> projectionWeightsDimensions{0};
420 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100421 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000422 hidl_vec<uint32_t> projectionBiasDimensions{0};
423 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100424
425 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000426 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
427 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100428 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000429 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
430 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100431
Matteo Martincighc7434122018-11-14 12:27:04 +0000432 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100433 // 20: The activation function: A value indicating the activation function:
434 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000435 hidl_vec<uint32_t> activationFunctionDimensions{};
436 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100437 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
438 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000439 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
440 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100441 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
442 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000443 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
444 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100445
446 // Outputs:
447 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
448 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000449 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
450 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
451 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
452 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
453 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
454 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100455 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000456 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100457 std::vector<float> outputStateOutValue {-0.0297319f, 0.122947f, 0.208851f, -0.153588f};
458 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000459 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
telsoa01ce3e84a2018-08-31 09:31:35 +0100460 std::vector<float> cellStateOutValue {-0.145439f, 0.157475f, 0.293663f, -0.277353f};
461 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
462 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000463 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100464 std::vector<float> outputValue {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f};
465
466 LstmTestImpl(inputDimensions, inputValue,
467 inputToInputWeightsDimensions, inputToInputWeightsValue,
468 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
469 inputToCellWeightsDimensions, inputToCellWeightsValue,
470 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
471 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
472 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
473 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
474 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
475 cellToInputWeightsDimensions, cellToInputWeightsValue,
476 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
477 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
478 inputGateBiasDimensions, inputGateBiasValue,
479 forgetGateBiasDimensions, forgetGateBiasValue,
480 cellBiasDimensions, cellBiasValue,
481 outputGateBiasDimensions, outputGateBiasValue,
482 projectionWeightsDimensions, projectionWeightsValue,
483 projectionBiasDimensions, projectionBiasValue,
484 outputStateInDimensions, outputStateInValue,
485 cellStateInDimensions, cellStateInValue,
486 activationFunctionDimensions, activationFunctionValue,
487 cellClippingThresholdDimensions, cellClippingThresholdValue,
488 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
489 scratchBufferDimensions, scratchBufferValue,
490 outputStateOutDimensions, outputStateOutValue,
491 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +0000492 outputDimensions, outputValue,
493 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100494}
495
Matteo Martincighc7434122018-11-14 12:27:04 +0000496void LstmCifgPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100497{
498 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
499 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
500 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
501
Matteo Martincighc7434122018-11-14 12:27:04 +0000502 uint32_t batchSize = 1;
503 uint32_t inputSize = 2;
504 uint32_t numUnits = 4;
505 uint32_t outputSize = numUnits;
506
telsoa01ce3e84a2018-08-31 09:31:35 +0100507 // Inputs:
508 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
509 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000510 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
511 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100512
513 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
514 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000515 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
516 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100517 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
518 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000519 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
520 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
521 0.13056988f, -0.36333650f,
522 -0.22755712f, 0.28253698f,
523 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100524 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000525 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
526 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
527 -0.09624726f, 0.05100781f,
528 0.04717243f, 0.48944736f,
529 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100530 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
531 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000532 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
533 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
534 -0.55932593f, -0.09426838f,
535 -0.44257352f, 0.54939759f,
536 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100537 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
538 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
539 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000540 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
541 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100542 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
543 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000544 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
545 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
546 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
547 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
548 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100549 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
550 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000551 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
552 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
553 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
554 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
555 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100556 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
557 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000558 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
559 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
560 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
561 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
562 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100563 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000564 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
565 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100566 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000567 hidl_vec<uint32_t> cellToForgetWeightsDimensions{4};
568 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100569 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000570 hidl_vec<uint32_t> cellToOutputWeightsDimensions{4};
571 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100572 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000573 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
574 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100575 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000576 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
577 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100578 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000579 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
580 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100581 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000582 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
583 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100584 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
585 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000586 hidl_vec<uint32_t> projectionWeightsDimensions{0};
587 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100588 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000589 hidl_vec<uint32_t> projectionBiasDimensions{0};
590 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100591
592 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000593 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
594 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100595 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000596 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
597 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100598
Matteo Martincighc7434122018-11-14 12:27:04 +0000599 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100600 // 20: The activation function: A value indicating the activation function:
601 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000602 hidl_vec<uint32_t> activationFunctionDimensions{};
603 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100604 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
605 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000606 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
607 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100608 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
609 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000610 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
611 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100612
613 // Outputs:
614 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
615 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000616 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
617 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
618 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
619 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
620 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
621 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100622 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000623 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
624 std::vector<float> outputStateOutValue{-0.364445f, -0.00352185f, 0.128866f, -0.0516365f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100625 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000626 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
627 std::vector<float> cellStateOutValue{-0.760444f, -0.0180416f, 0.182264f, -0.0649371f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100628 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
629 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000630 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
631 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100632
633 LstmTestImpl(inputDimensions, inputValue,
634 inputToInputWeightsDimensions, inputToInputWeightsValue,
635 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
636 inputToCellWeightsDimensions, inputToCellWeightsValue,
637 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
638 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
639 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
640 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
641 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
642 cellToInputWeightsDimensions, cellToInputWeightsValue,
643 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
644 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
645 inputGateBiasDimensions, inputGateBiasValue,
646 forgetGateBiasDimensions, forgetGateBiasValue,
647 cellBiasDimensions, cellBiasValue,
648 outputGateBiasDimensions, outputGateBiasValue,
649 projectionWeightsDimensions, projectionWeightsValue,
650 projectionBiasDimensions, projectionBiasValue,
651 outputStateInDimensions, outputStateInValue,
652 cellStateInDimensions, cellStateInValue,
653 activationFunctionDimensions, activationFunctionValue,
654 cellClippingThresholdDimensions, cellClippingThresholdValue,
655 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
656 scratchBufferDimensions, scratchBufferValue,
657 outputStateOutDimensions, outputStateOutValue,
658 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +0000659 outputDimensions, outputValue,
660 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100661}
662
Matteo Martincighc7434122018-11-14 12:27:04 +0000663void LstmNoCifgPeepholeProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100664{
665 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm3.model.cpp
666 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm3.example.cpp
667 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
668
Matteo Martincighc7434122018-11-14 12:27:04 +0000669 uint32_t batchSize = 2;
670 uint32_t inputSize = 5;
671 uint32_t numUnits = 20;
672 uint32_t outputSize = 16;
673
telsoa01ce3e84a2018-08-31 09:31:35 +0100674 // Inputs:
675 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
676 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000677 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
678 std::vector<float> inputValue{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
679 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100680
681 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
682 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000683 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
684 std::vector<float> inputToInputWeightsValue
685 {
686 0.0213936830f, 0.0612455100f, 0.0469051670f, -0.0146576770f, -0.0314946300f,
687 0.0917180300f, 0.1464780100f, 0.1079719300f, -0.0057968358f, 0.0019193048f,
688 -0.2726754000f, 0.1015402900f, -0.0185398850f, 0.0803498850f, -0.1026238500f,
689 -0.0225997870f, -0.0912115500f, -0.0086759670f, -0.0452061030f, -0.0821282000f,
690 -0.0080459520f, 0.0154780810f, 0.0552172470f, 0.0387195870f, 0.0441536270f,
691 -0.0645324300f, 0.0503182500f, -0.0469351080f, -0.0081644309f, 0.0145742260f,
692 -0.1671009000f, -0.1551955200f, -0.1681979700f, -0.1397126900f, -0.1195305900f,
693 0.2500548700f, -0.2279098300f, 0.0098550870f, -0.0281409580f, -0.1120069800f,
694 0.1129540800f, -0.0035217577f, 0.0544850750f, 0.0518469500f, 0.0647112060f,
695 0.1098919300f, 0.1167478600f, 0.0349060700f, 0.0772735700f, 0.1139058500f,
696 -0.1863375000f, -0.1034451000f, -0.1394518900f, -0.0494012270f, -0.1876706300f,
697 0.0424839030f, 0.1423355200f, 0.1383258100f, 0.1835016500f, 0.1454560300f,
698 -0.0285457040f, 0.0249395310f, 0.0509297180f, 0.0076203286f, -0.0029723682f,
699 -0.0424842240f, -0.1182759600f, -0.0917110400f, -0.1080862800f, -0.1632798800f,
700 -0.2273378000f, -0.0993647000f, -0.0171551070f, 0.0023917493f, 0.0492727640f,
701 0.0038534778f, 0.0547645050f, 0.0897537840f, 0.0694723400f, 0.0801447600f,
702 -0.0454423400f, -0.0497073000f, -0.0713563100f, -0.0489291060f, -0.0040420120f,
703 -0.0092840260f, 0.0180420540f, 0.0036860977f, -0.0742730200f, -0.1143460400f,
704 -0.0189954560f, 0.0314875430f, 0.0128349080f, 0.0199777540f, 0.0442566540f,
705 -0.3929261300f, -0.1851933400f, -0.1165128100f, -0.0680989200f, 0.0113736770f
706 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100707 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
708 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000709 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
710 std::vector<float> inputToForgetWeightsValue
711 {
712 -0.0018401089f, -0.0048522370f, 0.0369842400f, 0.0141817040f, 0.0282732360f,
713 -0.0167261940f, -0.0524975900f, -0.1020426100f, 0.0086106600f, -0.0409795050f,
714 -0.0098991870f, 0.0192389200f, -0.0281772690f, -0.0853510300f, -0.1458549500f,
715 0.1066256700f, -0.0190973100f, -0.0178835340f, -0.0047269356f, -0.0451033230f,
716 0.0030784295f, 0.0767847750f, 0.0746369600f, 0.0945313950f, 0.0814421000f,
717 -0.1225789900f, -0.0339457580f, -0.0313034650f, 0.0456306260f, 0.0684388700f,
718 -0.1349294500f, -0.0124800070f, -0.0811829000f, -0.0722449900f, -0.0962879100f,
719 0.0451009460f, 0.0012300825f, 0.0139646620f, 0.0993723940f, 0.0254305900f,
720 0.0695832400f, 0.0342572960f, 0.0482646000f, 0.0626799700f, 0.0526250680f,
721 0.1278466600f, 0.0707789700f, 0.0257259350f, 0.0416500900f, 0.0724190500f,
722 0.0186686440f, -0.0373772940f, -0.0627778300f, -0.0883363600f, -0.0401206050f,
723 -0.0114055860f, -0.0078083350f, -0.0103013860f, -0.0051021670f, 0.0277174640f,
724 0.0548342300f, 0.1144911100f, 0.1128965200f, 0.1093983900f, 0.1339650600f,
725 -0.0840216600f, -0.0190146200f, -0.0446783040f, -0.0772056500f, 0.0143500630f,
726 -0.1175795800f, -0.0652038000f, -0.0818573300f, -0.0767543240f, -0.0926143750f,
727 0.1040549100f, 0.0529603360f, 0.0357558950f, 0.0358393860f, -0.0125405530f,
728 0.0368812980f, 0.0291337600f, 0.0342015900f, 0.0544844700f, -0.0545233530f,
729 0.0258271500f, 0.0232735500f, -0.0118571790f, -0.0011980024f, -0.0346417170f,
730 -0.0261250940f, -0.1758261500f, -0.1592365700f, -0.2748677400f, -0.0006143371f,
731 0.0001771948f, -8.470171e-05f, 0.0265180700f, 0.0457907650f, 0.069564960f
732 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100733 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000734 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
735 std::vector<float> inputToCellWeightsValue
736 {
737 -0.0458028300f, -0.0954946200f, -0.0324189850f, -0.0645463300f, -0.0435284530f,
738 0.0430185870f, -0.0491523440f, -0.1241814400f, -0.0789854750f, -0.0759688900f,
739 0.0194843620f, -0.1143496200f, -0.0074034138f, -0.0631484400f, -0.0929814950f,
740 0.0062155537f, -0.0250343380f, -0.0028890965f, 0.0489295270f, 0.0623507500f,
741 0.1066591800f, -0.0320367920f, -0.0850591600f, -0.1084335800f, -0.1300243300f,
742 -0.0368164370f, -0.0213013400f, -0.0165182390f, 0.0047691227f, -0.0025825808f,
743 0.0660178660f, 0.0299915340f, -0.1065283600f, -0.1037554000f, -0.1305607100f,
744 -0.0326664300f, -0.0337024140f, -0.0064734240f, -0.0461169200f, 0.0144193390f,
745 -0.0251743230f, 0.0396852000f, 0.0817775060f, 0.0615746800f, 0.1021009500f,
746 -0.0096581940f, 0.0465117170f, 0.0360390600f, 0.0069369148f, 0.0159600950f,
747 -0.0650766600f, 0.0955159800f, 0.0535688360f, 0.0640871400f, 0.1283566700f,
748 -0.0087143290f, -0.2021196600f, -0.1209367400f, 0.0294504720f, 0.2849013000f,
749 -0.0292279010f, 0.1164364000f, -0.0856026300f, 0.0994178600f, -0.0369995650f,
750 -0.0288426260f, -0.0033637602f, -0.0170129020f, -0.0972086500f, -0.1119335100f,
751 -0.0291551170f, -0.0179360340f, -0.0097689360f, -0.0422332400f, -0.0361596350f,
752 0.0650511200f, -0.0217428920f, -0.0233772120f, -0.0722136400f, -0.0643055200f,
753 0.0545386500f, 0.0911498140f, 0.0638733100f, 0.0075183930f, 0.0559609530f,
754 0.0697793440f, 0.0464111680f, 0.1050991100f, 0.0746389400f, 0.0075130584f,
755 0.0128509820f, 0.0455543100f, 0.0569556880f, 0.0655528500f, 0.0508014560f,
756 -0.0098626830f, 0.0082677200f, -0.0265556090f, -0.0073611983f, -0.0014897042f
757 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100758 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
759 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000760 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
761 std::vector<float> inputToOutputWeightsValue
762 {
763 -0.0998932000f, -0.0720195600f, -0.0528037730f, -0.1562959300f, -0.1500191800f,
764 -0.0765075100f, 0.0235985500f, -0.0751553550f, -0.0803770900f, -0.1509353400f,
765 0.0295175520f, -0.0475139300f, 0.0103505310f, -0.0266485100f, -0.0168397220f,
766 -0.0231211630f, 0.0077019283f, 0.0128512570f, -0.0504064900f, -0.0129761000f,
767 -0.0217377470f, -0.0383057930f, -0.0687058600f, -0.0148124700f, -0.0012853940f,
768 0.1012423600f, 0.0831228350f, 0.0533130060f, -0.0622356460f, -0.0756371540f,
769 -0.0278339030f, 0.0297749710f, 0.1130802000f, 0.0921890600f, 0.0950613500f,
770 -0.0866657640f, -0.0371627060f, -0.0388809140f, -0.0358328450f, -0.0144815640f,
771 -0.0982500300f, -0.1204856900f, -0.0976655860f, -0.0528763300f, -0.0964047000f,
772 -0.1136642900f, 0.0357775050f, 0.1356881900f, 0.0524513830f, 0.0506493040f,
773 0.0579895100f, -0.0218523350f, -0.0998488440f, 0.0147404750f, -0.0788979460f,
774 0.0497469900f, 0.0141604730f, 0.0697393200f, 0.0496494200f, 0.0333646460f,
775 0.0819012400f, 0.0255353670f, 0.0508931650f, 0.0485142540f, 0.0694581300f,
776 -0.0789075640f, -0.0670761600f, -0.1184450800f, -0.0998668800f, -0.0750940300f,
777 0.0626322600f, 0.1492558700f, 0.2018843600f, 0.1209845100f, 0.1463941500f,
778 0.0015017595f, -0.0142673820f, -0.0341725700f, 0.0127114680f, 0.0028300495f,
779 -0.0247584820f, -0.0509854800f, -0.0821182000f, 0.0142256720f, 0.0215441580f,
780 0.0894972500f, 0.0750526800f, -0.0020780868f, 0.0490825800f, 0.0647629500f,
781 -0.0229070630f, 0.0275624560f, 0.0401857350f, 0.0195675770f, -0.0155987390f,
782 -0.0490973030f, -0.0171218660f, -0.0833682340f, -0.0233200200f, -0.084095600f
783 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100784 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
785 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
786 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000787 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
788 std::vector<float> recurrentToInputWeightsValue
789 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100790 -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, // 00
791 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
792 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
793 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000794 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f, // 01
795 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100796 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000797 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100798 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f, // 02
Matteo Martincighc7434122018-11-14 12:27:04 +0000799 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100800 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000801 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100802 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f, // 03
Matteo Martincighc7434122018-11-14 12:27:04 +0000803 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100804 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
805 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
806 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f, // 04
807 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
808 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000809 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
810 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f, // 05
811 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
812 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
813 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100814 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f, // 06
815 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000816 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100817 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
818 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f, // 07
819 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
820 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
821 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
822 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +0000823 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100824 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000825 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100826 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f, // 09
827 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000828 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100829 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
830 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f, // 10
Matteo Martincighc7434122018-11-14 12:27:04 +0000831 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100832 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000833 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
834 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f, // 11
835 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
836 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
837 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
838 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f, // 12
telsoa01ce3e84a2018-08-31 09:31:35 +0100839 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
840 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
841 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
842 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f, // 13
843 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000844 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
845 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100846 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +0000847 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
848 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
849 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100850 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f, // 15
851 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000852 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100853 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
854 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f, // 16
Matteo Martincighc7434122018-11-14 12:27:04 +0000855 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
856 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100857 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
858 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +0000859 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
860 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
861 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
862 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f, // 18
telsoa01ce3e84a2018-08-31 09:31:35 +0100863 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
864 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
865 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
866 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, // 19
Matteo Martincighc7434122018-11-14 12:27:04 +0000867 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100868 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000869 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
870 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100871 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
872 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000873 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
874 std::vector<float> recurrentToForgetWeightsValue
875 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100876 -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, // 00
Matteo Martincighc7434122018-11-14 12:27:04 +0000877 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100878 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000879 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
880 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f, // 01
telsoa01ce3e84a2018-08-31 09:31:35 +0100881 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
882 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000883 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100884 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f, // 02
885 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000886 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100887 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
888 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f, // 03
889 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000890 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
891 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100892 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f, // 04
Matteo Martincighc7434122018-11-14 12:27:04 +0000893 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
894 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100895 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
896 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f, // 05
Matteo Martincighc7434122018-11-14 12:27:04 +0000897 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100898 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000899 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100900 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f, // 06
Matteo Martincighc7434122018-11-14 12:27:04 +0000901 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100902 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000903 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
904 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f, // 07
905 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100906 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
907 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
908 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +0000909 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
910 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
911 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
912 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f, // 09
telsoa01ce3e84a2018-08-31 09:31:35 +0100913 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000914 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
915 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100916 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f, // 10
917 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
918 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000919 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
920 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f, // 11
921 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
922 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100923 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
924 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f, // 12
Matteo Martincighc7434122018-11-14 12:27:04 +0000925 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100926 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
927 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
928 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f, // 13
929 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000930 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100931 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
932 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +0000933 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100934 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000935 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
936 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f, // 15
937 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
938 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
939 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
940 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f, // 16
telsoa01ce3e84a2018-08-31 09:31:35 +0100941 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000942 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100943 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
944 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +0000945 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100946 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
947 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000948 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f, // 18
949 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
950 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
951 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100952 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, // 19
953 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000954 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
955 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
956 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100957 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
958 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000959 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
960 std::vector<float> recurrentToCellWeightsValue
961 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100962 -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000963 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
964 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100965 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000966 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
967 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100968 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
969 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
970 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
971 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000972 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100973 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
974 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000975 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
976 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
977 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100978 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000979 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
980 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100981 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000982 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
983 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
984 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100985 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000986 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100987 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000988 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100989 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000990 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100991 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000992 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
993 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100994 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000995 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100996 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000997 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100998 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
999 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
1000 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
1001 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001002 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
1003 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001004 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001005 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
1006 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
1007 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001008 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001009 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
1010 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
1011 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
1012 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001013 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001014 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001015 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001016 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001017 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
1018 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
1019 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
1020 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001021 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
1022 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001023 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
1024 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
1025 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
1026 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
1027 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001028 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
1029 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001030 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001031 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
1032 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001033 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
1034 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001035 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001036 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
1037 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001038 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001039 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
1040 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001041 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
1042 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001043 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1044 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001045 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1046 std::vector<float> recurrentToOutputWeightsValue
1047 {
1048 0.025825322f, -0.05813119f, 0.09495884f, -0.045984812f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001049 -0.01255415f, -0.0026479573f, -0.08196161f, -0.054914974f,
1050 -0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f,
1051 -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f,
1052 -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f,
1053 -0.023314456f, -0.007401714f, -0.09255757f, 0.029460307f,
1054 -0.08829125f, -0.005139627f, -0.08989442f, -0.0555066f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001055 0.13596267f, -0.025062224f, -0.048351806f, -0.03850004f,
1056 0.07266485f, -0.022414139f, 0.05940088f, 0.075114764f,
1057 0.09597592f, -0.010211725f, -0.0049794707f, -0.011523867f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001058 -0.025980417f, 0.072999895f, 0.11091378f, -0.081685916f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001059 0.014416728f, 0.043229222f, 0.034178585f, -0.07530371f,
1060 0.035837382f, -0.085607f, -0.007721233f, -0.03287832f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001061 -0.043848954f, -0.06404588f, -0.06632928f, -0.073643476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001062 0.008214239f, -0.045984086f, 0.039764922f, 0.03474462f,
1063 0.060612556f, -0.080590084f, 0.049127717f, 0.04151091f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001064 -0.030063879f, 0.008801774f, -0.023021035f, -0.019558564f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001065 0.05158114f, -0.010947698f, -0.011825728f, 0.0075720972f,
1066 0.0699727f, -0.0039981045f, 0.069350146f, 0.08799282f,
1067 0.016156472f, 0.035502106f, 0.11695009f, 0.006217345f,
1068 0.13392477f, -0.037875112f, 0.025745004f, 0.08940699f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001069 -0.00924166f, 0.0046702605f, -0.036598757f, -0.08811812f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001070 0.10522024f, -0.032441203f, 0.008176899f, -0.04454919f,
1071 0.07058152f, 0.0067963637f, 0.039206743f, 0.03259838f,
1072 0.03725492f, -0.09515802f, 0.013326398f, -0.052055415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001073 -0.025676316f, 0.03198509f, -0.015951829f, -0.058556724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001074 0.036879618f, 0.043357447f, 0.028362012f, -0.05908629f,
1075 0.0059240665f, -0.04995891f, -0.019187413f, 0.0276265f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001076 -0.01628143f, 0.0025863599f, 0.08800015f, 0.035250366f,
1077 -0.022165963f, -0.07328642f, -0.009415526f, -0.07455109f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001078 0.11690406f, 0.0363299f, 0.07411125f, 0.042103454f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001079 -0.009660886f, 0.019076364f, 0.018299393f, -0.046004917f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001080 0.08891175f, 0.0431396f, -0.026327137f, -0.051502608f,
1081 0.08979574f, -0.051670972f, 0.04940282f, -0.07491107f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001082 -0.021240504f, 0.022596184f, -0.034280192f, 0.060163025f,
1083 -0.058211457f, -0.051837247f, -0.01349775f, -0.04639988f,
1084 -0.035936575f, -0.011681591f, 0.064818054f, 0.0073146066f,
1085 -0.021745546f, -0.043124277f, -0.06471268f, -0.07053354f,
1086 -0.029321948f, -0.05330136f, 0.016933719f, -0.053782392f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001087 0.13747959f, -0.1361751f, -0.11569455f, 0.0033329215f,
1088 0.05693899f, -0.053219706f, 0.063698f, 0.07977434f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001089 -0.07924483f, 0.06936997f, 0.0034815092f, -0.007305279f,
1090 -0.037325785f, -0.07251102f, -0.033633437f, -0.08677009f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001091 0.091591336f, -0.14165086f, 0.021752775f, 0.019683983f,
1092 0.0011612234f, -0.058154266f, 0.049996935f, 0.0288841f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001093 -0.0024567875f, -0.14345716f, 0.010955264f, -0.10234828f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001094 0.1183656f, -0.0010731248f, -0.023590032f, -0.072285876f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001095 -0.0724771f, -0.026382286f, -0.0014920527f, 0.042667855f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001096 0.0018776858f, 0.02986552f, 0.009814309f, 0.0733756f,
1097 0.12289186f, 0.018043943f, -0.0458958f, 0.049412545f,
1098 0.033632483f, 0.05495232f, 0.036686596f, -0.013781798f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001099 -0.010036754f, 0.02576849f, -0.08307328f, 0.010112348f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001100 0.042521734f, -0.05869831f, -0.071689695f, 0.03876447f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001101 -0.13275425f, -0.0352966f, -0.023077697f, 0.10285965f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001102 0.084736146f, 0.15568255f, -0.00040734606f, 0.027835453f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001103 -0.10292561f, -0.032401145f, 0.10053256f, -0.026142767f,
1104 -0.08271222f, -0.0030240538f, -0.016368777f, 0.1070414f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001105 0.042672627f, 0.013456989f, -0.0437609f, -0.022309763f,
1106 0.11576483f, 0.04108048f, 0.061026827f, -0.0190714f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001107 -0.0869359f, 0.037901703f, 0.0610107f, 0.07202949f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001108 0.01675338f, 0.086139716f, -0.08795751f, -0.014898893f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001109 -0.023771819f, -0.01965048f, 0.007955471f, -0.043740474f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001110 0.03346837f, -0.10549954f, 0.090567775f, 0.042013682f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001111 -0.03176985f, 0.12569028f, -0.02421228f, -0.029526481f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001112 0.023851605f, 0.031539805f, 0.05292009f, -0.02344001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001113 -0.07811758f, -0.08834428f, 0.10094801f, 0.16594367f,
1114 -0.06861939f, -0.021256343f, -0.041093912f, -0.06669611f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001115 0.035498552f, 0.021757556f, -0.09302526f, -0.015403468f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001116 -0.06614931f, -0.051798206f, -0.013874718f, 0.03630673f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001117 0.010412845f, -0.08077351f, 0.046185967f, 0.0035662893f,
1118 0.03541868f, -0.094149634f, -0.034814864f, 0.003128424f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001119 -0.020674974f, -0.03944324f, -0.008110165f, -0.11113267f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001120 0.08484226f, 0.043586485f, 0.040582247f, 0.0968012f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001121 -0.065249965f, -0.028036479f, 0.0050708856f, 0.0017462453f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001122 0.0326779f, 0.041296225f, 0.09164146f, -0.047743853f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001123 -0.015952192f, -0.034451712f, 0.084197424f, -0.05347844f,
1124 -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001125 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001126 -0.057309967f, -0.012775832f, -0.0032452994f, 0.01977615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001127 -0.041040014f, -0.024264973f, 0.063464895f, 0.05431621f
1128 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001129 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001130 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1131 std::vector<float> cellToInputWeightsValue
1132 {
1133 0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
1134 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
1135 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f, -0.052169047f,
1136 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
1137 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001138 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001139 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1140 std::vector<float> cellToForgetWeightsValue
1141 {
1142 -0.01998659f, -0.15568835f, -0.24248174f, -0.012770197f, 0.041331276f,
1143 -0.072311886f, -0.052123554f, -0.0066330447f, -0.043891653f, 0.036225766f,
1144 -0.047248036f, 0.021479502f, 0.033189066f, 0.11952997f, -0.020432774f,
1145 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
1146 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001147 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001148 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1149 std::vector<float> cellToOutputWeightsValue
1150 {
1151 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
1152 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
1153 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
1154 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
1155 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001156 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001157 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1158 std::vector<float> inputGateBiasValue
1159 {
1160 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
1161 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
1162 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
1163 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
1164 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001165 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001166 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1167 std::vector<float> forgetGateBiasValue
1168 {
1169 0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, 0.11098921f,
1170 0.15378423f, 0.09263801f, 0.09790885f, 0.09508917f, 0.061199076f,
1171 0.07665568f, -0.015443159f, -0.03499149f, 0.046190713f, 0.08895977f,
1172 0.10899629f, 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
1173 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001174 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001175 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1176 std::vector<float> cellBiasValue
1177 {
1178 -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, -0.1483596f,
1179 -0.10639995f, -0.091433935f, 0.058573797f, -0.06809782f, -0.07889636f,
1180 -0.043246906f, -0.09829136f, -0.4279842f, 0.034901652f, 0.18797937f,
1181 0.0075234566f, 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
1182 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001183 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001184 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1185 std::vector<float> outputGateBiasValue
1186 {
1187 0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
1188 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
1189 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
1190 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
1191 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001192 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1193 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001194 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
1195 std::vector<float> projectionWeightsValue
1196 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001197 -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001198 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001199 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
1200 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001201 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
1202 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
1203 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
1204 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001205 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
1206 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
1207 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001208 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
1209 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
1210 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
1211 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
1212 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001213 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001214 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001215 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001216 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001217 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
1218 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001219 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001220 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001221 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001222 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
1223 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001224 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001225 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
1226 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
1227 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001228 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
1229 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001230 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001231 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
1232 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
1233 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
1234 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
1235 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001236 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
1237 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001238 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001239 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
1240 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001241 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
1242 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
1243 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001244 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
1245 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
1246 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001247 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001248 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001249 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
1250 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001251 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
1252 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
1253 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001254 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001255 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
1256 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
1257 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001258 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
1259 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
1260 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
1261 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001262 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001263 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1264 std::vector<float> projectionBiasValue(outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001265
1266 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001267 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1268 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001269 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001270 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1271 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001272
Matteo Martincighc7434122018-11-14 12:27:04 +00001273 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001274 // 20: The activation function: A value indicating the activation function:
1275 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001276 hidl_vec<uint32_t> activationFunctionDimensions{};
1277 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001278 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1279 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001280 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1281 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001282 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1283 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001284 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1285 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001286
1287 // Outputs:
1288 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1289 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001290 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1291 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1292 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1293 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1294 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
1295 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001296 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001297 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1298 std::vector<float> outputStateOutValue
1299 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001300 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835577f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001301 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415119f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001302 -0.013869f, 0.0287268f, -0.00334694f, 0.00733397f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001303 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.0216801f
1304 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001305 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001306 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1307 std::vector<float> cellStateOutValue
1308 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001309 -0.0531632f, -0.0118138f, 0.0870833f, 0.0347929f, -0.076144f,
1310 -0.0659219f, -0.0463811f, 0.0141307f, -0.0127706f, -0.03782f,
1311 -0.00402401f, -0.00571876f, -0.187957f, -0.0247127f, 0.0711425f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001312 0.008244f, 0.0492649f, 0.126972f, 0.0933097f, 0.29848f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001313 -0.0966178f, -0.114417f, 0.0387229f, 0.0453255f, -0.181286f,
1314 -0.0651251f, -0.0996879f, -0.00276995f, 0.0617558f, -0.0100728f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001315 0.056304f, -0.077416f, -0.162858f, -0.0541251f, 0.0571202f,
1316 -0.0525331f, 0.0724297f, 0.171029f, 0.141738f, 0.295483f
1317 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001318 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1319 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001320 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1321 std::vector<float> outputValue
1322 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001323 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001324 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001325 -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001326 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f
1327 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001328
1329 LstmTestImpl(inputDimensions, inputValue,
1330 inputToInputWeightsDimensions, inputToInputWeightsValue,
1331 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1332 inputToCellWeightsDimensions, inputToCellWeightsValue,
1333 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1334 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1335 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1336 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1337 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1338 cellToInputWeightsDimensions, cellToInputWeightsValue,
1339 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1340 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1341 inputGateBiasDimensions, inputGateBiasValue,
1342 forgetGateBiasDimensions, forgetGateBiasValue,
1343 cellBiasDimensions, cellBiasValue,
1344 outputGateBiasDimensions, outputGateBiasValue,
1345 projectionWeightsDimensions, projectionWeightsValue,
1346 projectionBiasDimensions, projectionBiasValue,
1347 outputStateInDimensions, outputStateInValue,
1348 cellStateInDimensions, cellStateInValue,
1349 activationFunctionDimensions, activationFunctionValue,
1350 cellClippingThresholdDimensions, cellClippingThresholdValue,
1351 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1352 scratchBufferDimensions, scratchBufferValue,
1353 outputStateOutDimensions, outputStateOutValue,
1354 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +00001355 outputDimensions, outputValue,
1356 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +01001357}
1358
Matteo Martincighc7434122018-11-14 12:27:04 +00001359void LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +01001360{
1361 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
1362 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
1363 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1364 // The batch size has been increased to 2 (it was 1 in the VTS test) with appropriate input and output values added.
1365
1366 uint32_t batchSize = 2;
1367 uint32_t inputSize = 2;
1368 uint32_t numUnits = 4;
1369 uint32_t outputSize = numUnits;
1370
1371 // Inputs:
1372 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1373 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +00001374 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1375 std::vector<float> inputValue{2.0f, 3.0f, 3.0f, 4.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001376
1377 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1378 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +00001379 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1380 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001381 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1382 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001383 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1384 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
1385 0.13056988f, -0.36333650f,
1386 -0.22755712f, 0.28253698f,
1387 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001388 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001389 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1390 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
1391 -0.09624726f, 0.05100781f,
1392 0.04717243f, 0.48944736f,
1393 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001394 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1395 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001396 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1397 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
1398 -0.55932593f, -0.09426838f,
1399 -0.44257352f, 0.54939759f,
1400 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001401 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1402 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1403 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +00001404 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
1405 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001406 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1407 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001408 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1409 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
1410 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
1411 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
1412 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001413 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1414 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001415 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1416 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
1417 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
1418 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
1419 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001420 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1421 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001422 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1423 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
1424 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
1425 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1426 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001427 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001428 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1429 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001430 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001431 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1432 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001433 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001434 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1435 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001436 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001437 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
1438 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001439 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001440 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1441 std::vector<float> forgetGateBiasValue{1.0f, 1.0f, 1.0f, 1.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001442 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001443 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1444 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001445 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001446 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1447 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001448 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1449 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001450 hidl_vec<uint32_t> projectionWeightsDimensions{0};
1451 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001452 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001453 hidl_vec<uint32_t> projectionBiasDimensions{0};
1454 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001455
1456 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001457 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1458 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001459 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001460 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1461 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001462
Matteo Martincighc7434122018-11-14 12:27:04 +00001463 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001464 // 20: The activation function: A value indicating the activation function:
1465 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001466 hidl_vec<uint32_t> activationFunctionDimensions{};
1467 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001468 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1469 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001470 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1471 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001472 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1473 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001474 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1475 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001476
1477 // Outputs:
1478 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1479 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001480 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1481 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1482 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1483 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1484 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1485 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001486 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001487 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1488 std::vector<float> outputStateOutValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1489 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001490 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001491 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1492 std::vector<float> cellStateOutValue{-0.76044439f, -0.01804161f, 0.18226376f, -0.06493707f,
1493 -0.90477051f, -0.04355603f, 0.18475688f, -0.04158677f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001494 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1495 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001496 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1497 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1498 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001499
1500 LstmTestImpl(inputDimensions, inputValue,
1501 inputToInputWeightsDimensions, inputToInputWeightsValue,
1502 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1503 inputToCellWeightsDimensions, inputToCellWeightsValue,
1504 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1505 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1506 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1507 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1508 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1509 cellToInputWeightsDimensions, cellToInputWeightsValue,
1510 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1511 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1512 inputGateBiasDimensions, inputGateBiasValue,
1513 forgetGateBiasDimensions, forgetGateBiasValue,
1514 cellBiasDimensions, cellBiasValue,
1515 outputGateBiasDimensions, outputGateBiasValue,
1516 projectionWeightsDimensions, projectionWeightsValue,
1517 projectionBiasDimensions, projectionBiasValue,
1518 outputStateInDimensions, outputStateInValue,
1519 cellStateInDimensions, cellStateInValue,
1520 activationFunctionDimensions, activationFunctionValue,
1521 cellClippingThresholdDimensions, cellClippingThresholdValue,
1522 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1523 scratchBufferDimensions, scratchBufferValue,
1524 outputStateOutDimensions, outputStateOutValue,
1525 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +00001526 outputDimensions, outputValue,
1527 compute);
1528}
Kevin Mayedc5ffa2019-05-22 12:02:53 +01001529#ifndef ARMCOMPUTECL_ENABLED
1530 static const boost::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
1531#else
1532 static const boost::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
1533#endif
Matteo Martincighc7434122018-11-14 12:27:04 +00001534
1535BOOST_DATA_TEST_CASE(LstmNoCifgNoPeepholeNoProjectionTest, COMPUTE_DEVICES)
1536{
1537 LstmNoCifgNoPeepholeNoProjection(sample);
1538}
1539
1540BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionTest, COMPUTE_DEVICES)
1541{
1542 LstmCifgPeepholeNoProjection(sample);
1543}
1544
1545BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionTest, COMPUTE_DEVICES)
1546{
1547 LstmNoCifgPeepholeProjection(sample);
1548}
1549
1550BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionBatch2Test, COMPUTE_DEVICES)
1551{
1552 LstmCifgPeepholeNoProjectionBatch2(sample);
telsoa01ce3e84a2018-08-31 09:31:35 +01001553}
1554
1555BOOST_AUTO_TEST_SUITE_END()