blob: b1b7c9d5220d96f1a65892ec39a1a5f3b874d2d2 [file] [log] [blame]
telsoa01ce3e84a2018-08-31 09:31:35 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa01ce3e84a2018-08-31 09:31:35 +01004//
5#include "DriverTestHelpers.hpp"
Matteo Martincighc7434122018-11-14 12:27:04 +00006#include "OperationsUtils.h"
7
8#include <boost/array.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +01009#include <boost/test/unit_test.hpp>
Matteo Martincighc7434122018-11-14 12:27:04 +000010#include <boost/test/data/test_case.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010011#include <boost/math/special_functions/relative_difference.hpp>
12#include <log/log.h>
13
telsoa01ce3e84a2018-08-31 09:31:35 +010014#include <cmath>
15
16BOOST_AUTO_TEST_SUITE(LstmTests)
17
18using ArmnnDriver = armnn_driver::ArmnnDriver;
19using DriverOptions = armnn_driver::DriverOptions;
20using namespace driverTestHelpers;
21using namespace android::hardware;
22
23namespace
24{
25
26template<typename T>
Matteo Martincighc7434122018-11-14 12:27:04 +000027RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
telsoa01ce3e84a2018-08-31 09:31:35 +010028{
29 DataLocation inputInloc = {};
30 inputInloc.poolIndex = poolIndex;
31 inputInloc.offset = 0;
32 inputInloc.length = value.size() * sizeof(T);
33 RequestArgument inputRequestArgument = {};
34 inputRequestArgument.location = inputInloc;
35 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
36 return inputRequestArgument;
37}
38
39// Returns true if the relative difference between two float values is less than the tolerance value given.
40// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
41bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f)
42{
43 float rd;
44 if (a == 0.0f)
45 {
46 rd = fabs(b);
47 }
48 else if (b == 0.0f)
49 {
50 rd = fabs(a);
51 }
52 else
53 {
54 rd = boost::math::relative_difference(a, b);
55 }
56 return rd < tolerance;
57}
58
Kevin Mayf29a2c52019-03-14 11:56:32 +000059// Helper function to create an OperandLifeTime::NO_VALUE for testing.
60// To be used on optional input operands that have no values - these are valid and should be tested.
61OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
62{
63 // Only create a NO_VALUE for optional operands that have no elements
64 if (dimensions.size() == 0 || dimensions[0] == 0)
65 {
66 return OperandLifeTime::NO_VALUE;
67 }
68 return OperandLifeTime::CONSTANT_COPY;
69}
Matteo Martincighc7434122018-11-14 12:27:04 +000070} // anonymous namespace
telsoa01ce3e84a2018-08-31 09:31:35 +010071
72// Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
73
Matteo Martincighc7434122018-11-14 12:27:04 +000074void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
75 const std::vector<float>& inputValue,
76 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
77 const std::vector<float>& inputToInputWeightsValue,
78 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
79 const std::vector<float>& inputToForgetWeightsValue,
80 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
81 const std::vector<float>& inputToCellWeightsValue,
82 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
83 const std::vector<float>& inputToOutputWeightsValue,
84 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
85 const std::vector<float>& recurrentToInputWeightsValue,
86 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
87 const std::vector<float>& recurrentToForgetWeightsValue,
88 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
89 const std::vector<float>& recurrentToCellWeightsValue,
90 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
91 const std::vector<float>& recurrentToOutputWeightsValue,
92 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
93 const std::vector<float>& cellToInputWeightsValue,
94 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
95 const std::vector<float>& cellToForgetWeightsValue,
96 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
97 const std::vector<float>& cellToOutputWeightsValue,
98 const hidl_vec<uint32_t>& inputGateBiasDimensions,
99 const std::vector<float>& inputGateBiasValue,
100 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
101 const std::vector<float>& forgetGateBiasValue,
102 const hidl_vec<uint32_t>& cellBiasDimensions,
103 const std::vector<float>& cellBiasValue,
104 const hidl_vec<uint32_t>& outputGateBiasDimensions,
105 const std::vector<float>& outputGateBiasValue,
106 const hidl_vec<uint32_t>& projectionWeightsDimensions,
107 const std::vector<float>& projectionWeightsValue,
108 const hidl_vec<uint32_t>& projectionBiasDimensions,
109 const std::vector<float>& projectionBiasValue,
110 const hidl_vec<uint32_t>& outputStateInDimensions,
111 const std::vector<float>& outputStateInValue,
112 const hidl_vec<uint32_t>& cellStateInDimensions,
113 const std::vector<float>& cellStateInValue,
114 const hidl_vec<uint32_t>& activationFunctionDimensions,
115 const std::vector<int32_t>& activationFunctionValue,
116 const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
117 const std::vector<float>& cellClippingThresholdValue,
118 const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
119 const std::vector<float>& projectionClippingThresholdValue,
120 const hidl_vec<uint32_t>& scratchBufferDimensions,
121 const std::vector<float>& scratchBufferValue,
122 const hidl_vec<uint32_t>& outputStateOutDimensions,
123 const std::vector<float>& outputStateOutValue,
124 const hidl_vec<uint32_t>& cellStateOutDimensions,
125 const std::vector<float>& cellStateOutValue,
126 const hidl_vec<uint32_t>& outputDimensions,
127 const std::vector<float>& outputValue,
128 armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100129{
Matteo Martincighc7434122018-11-14 12:27:04 +0000130 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
Matteo Martincigh8b287c22018-09-07 09:25:10 +0100131 V1_0::Model model = {};
telsoa01ce3e84a2018-08-31 09:31:35 +0100132
133 // Inputs:
134 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
135 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
136 AddInputOperand(model, inputDimensions);
137
138 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
139 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Sadik Armagane6e54a82019-05-08 10:18:05 +0100140 AddTensorOperand(model, inputToInputWeightsDimensions, inputToInputWeightsValue, V1_0::OperandType::TENSOR_FLOAT32,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000141 CreateNoValueLifeTime(inputToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100142 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
143 // [num_units, input_size].
144 AddTensorOperand(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
145 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
146 AddTensorOperand(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
147 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
148 // [num_units, input_size].
149 AddTensorOperand(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
150 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
151 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
152 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Kevin Mayf29a2c52019-03-14 11:56:32 +0000153 AddTensorOperand(model, recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
Sadik Armagane6e54a82019-05-08 10:18:05 +0100154 V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100155 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
156 // [num_units, output_size].
157 AddTensorOperand(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
158 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
159 // [num_units, output_size].
160 AddTensorOperand(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
161 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
162 // [num_units, output_size].
163 AddTensorOperand(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
164 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Kevin Mayf29a2c52019-03-14 11:56:32 +0000165 AddTensorOperand(model, cellToInputWeightsDimensions, cellToInputWeightsValue,
Sadik Armagane6e54a82019-05-08 10:18:05 +0100166 V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100167 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Kevin Mayf29a2c52019-03-14 11:56:32 +0000168 AddTensorOperand(model, cellToForgetWeightsDimensions, cellToForgetWeightsValue,
Sadik Armagane6e54a82019-05-08 10:18:05 +0100169 V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToForgetWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100170 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Kevin Mayf29a2c52019-03-14 11:56:32 +0000171 AddTensorOperand(model, cellToOutputWeightsDimensions, cellToOutputWeightsValue,
Sadik Armagane6e54a82019-05-08 10:18:05 +0100172 V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToOutputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100173 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Kevin Mayf29a2c52019-03-14 11:56:32 +0000174 AddTensorOperand(model, inputGateBiasDimensions, inputGateBiasValue,
Sadik Armagane6e54a82019-05-08 10:18:05 +0100175 V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(inputGateBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100176 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
177 AddTensorOperand(model, forgetGateBiasDimensions, forgetGateBiasValue);
178 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
179 AddTensorOperand(model, cellBiasDimensions, cellBiasValue);
180 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
181 AddTensorOperand(model, outputGateBiasDimensions, outputGateBiasValue);
182 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
183 // [output_size, num_units].
Kevin Mayf29a2c52019-03-14 11:56:32 +0000184 AddTensorOperand(model, projectionWeightsDimensions, projectionWeightsValue,
Sadik Armagane6e54a82019-05-08 10:18:05 +0100185 V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(projectionWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100186 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Kevin Mayf29a2c52019-03-14 11:56:32 +0000187 AddTensorOperand(model, projectionBiasDimensions, projectionBiasValue,
Sadik Armagane6e54a82019-05-08 10:18:05 +0100188 V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(projectionBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100189
190 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
191 AddInputOperand(model, outputStateInDimensions);
192 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
193 AddInputOperand(model, cellStateInDimensions);
194
Matteo Martincighc7434122018-11-14 12:27:04 +0000195 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100196 // 20: The activation function: A value indicating the activation function:
197 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
198 AddTensorOperand(model, activationFunctionDimensions,
Sadik Armagane6e54a82019-05-08 10:18:05 +0100199 activationFunctionValue, V1_0::OperandType::INT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100200 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
201 // If set to 0.0 then clipping is disabled.
202 AddTensorOperand(model, cellClippingThresholdDimensions,
Sadik Armagane6e54a82019-05-08 10:18:05 +0100203 cellClippingThresholdValue, V1_0::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100204 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
205 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
206 AddTensorOperand(model, projectionClippingThresholdDimensions,
Sadik Armagane6e54a82019-05-08 10:18:05 +0100207 projectionClippingThresholdValue, V1_0::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100208
209 // Outputs:
210 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
211 // CIFG, or [batch_size, num_units * 3] without CIFG.
212 AddOutputOperand(model, scratchBufferDimensions);
213 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
214 AddOutputOperand(model, outputStateOutDimensions);
215 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
216 AddOutputOperand(model, cellStateOutDimensions);
217 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
218 // effectively the same as the current “output state (out)” value.
219 AddOutputOperand(model, outputDimensions);
220
221 // make the lstm operation
222 model.operations.resize(1);
Matteo Martincigh8b287c22018-09-07 09:25:10 +0100223 model.operations[0].type = V1_0::OperationType::LSTM;
telsoa01ce3e84a2018-08-31 09:31:35 +0100224 model.operations[0].inputs =
225 hidl_vec<uint32_t> {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
226 model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};
227
228 // define the input values
229 hidl_vec<RequestArgument> inputArguments;
230 inputArguments.resize(3);
231
232 inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
233 inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
234 inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
235
236 // define the expected output values
237 hidl_vec<RequestArgument> outputArguments;
238 outputArguments.resize(4);
239
240 outputArguments[0] = CreateRequestArgument<float>(scratchBufferValue, 3);
241 outputArguments[1] = CreateRequestArgument<float>(outputStateOutValue, 4);
242 outputArguments[2] = CreateRequestArgument<float>(cellStateOutValue, 5);
243 outputArguments[3] = CreateRequestArgument<float>(outputValue, 6);
244
245 Request request = {};
246 request.inputs = inputArguments;
247 request.outputs = outputArguments;
248
249 // set the input data
250 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
251 AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
252 AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
253
254 // add memory for the outputs
255 AddPoolAndGetData(scratchBufferValue.size(), request);
256 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData(outputStateOutValue.size(), request);
257 float* outputStateOutData = static_cast<float*>(static_cast<void*>(outputStateOutMemory->getPointer()));
258 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData(cellStateOutValue.size(), request);
259 float* cellStateOutData = static_cast<float*>(static_cast<void*>(cellStateOutMemory->getPointer()));
260 android::sp<IMemory> outputMemory = AddPoolAndGetData(outputValue.size(), request);
261 float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
262
263 // make the prepared model and run the execution
Sadik Armagane6e54a82019-05-08 10:18:05 +0100264 android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, *driver);
telsoa01ce3e84a2018-08-31 09:31:35 +0100265 if (preparedModel.get() != nullptr)
266 {
267 Execute(preparedModel, request);
268 }
269
270 // check the results
271 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
272 {
273 BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
274 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
275 }
276 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
277 {
278 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
279 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
280 }
281 for (size_t i = 0; i < outputValue.size(); ++i)
282 {
283 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
284 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
285 }
286}
287
Matteo Martincighc7434122018-11-14 12:27:04 +0000288void LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100289{
290 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm.model.cpp
291 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm.example.cpp
292 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
293
Matteo Martincighc7434122018-11-14 12:27:04 +0000294 uint32_t batchSize = 1;
295 uint32_t inputSize = 2;
296 uint32_t numUnits = 4;
297 uint32_t outputSize = numUnits;
298
telsoa01ce3e84a2018-08-31 09:31:35 +0100299 // Inputs:
300 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
301 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000302 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
303 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100304
305 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
306 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000307 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
308 std::vector<float> inputToInputWeightsValue{-0.45018822f, -0.02338299f,
309 -0.08705890f, -0.34550029f,
310 0.04266912f, -0.15680569f,
311 -0.34856534f, 0.43890524f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100312 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
313 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000314 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
315 std::vector<float> inputToForgetWeightsValue{ 0.09701663f, 0.20334584f,
316 -0.50592935f, -0.31343272f,
317 -0.40032279f, 0.44781327f,
318 0.01387155f, -0.35593212f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100319 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000320 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
321 std::vector<float> inputToCellWeightsValue{-0.50013041f, 0.13702840f,
322 0.11810488f, 0.20131630f,
323 -0.20583314f, 0.44344562f,
324 0.22077113f, -0.29909778f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100325 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
326 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000327 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
328 std::vector<float> inputToOutputWeightsValue{-0.25065863f, -0.28290087f,
329 0.04613829f, 0.40525138f,
330 0.44272184f, 0.03897077f,
331 -0.15568960f, 0.19487578f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100332 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
333 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
334 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000335 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
336 std::vector<float> recurrentToInputWeightsValue{-0.00635350f, -0.20423880f, 0.31454784f, -0.35746509f,
337 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f,
338 -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f,
339 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100340 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
341 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000342 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
343 std::vector<float> recurrentToForgetWeightsValue{-0.48684245f, -0.06655136f, 0.42224967f, 0.21126390f,
344 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f,
345 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f,
346 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100347 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
348 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000349 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
350 std::vector<float> recurrentToCellWeightsValue{-0.34074140f, 0.24443203f, -0.20785320f, 0.26320225f,
351 0.05695659f, -0.00123841f, -0.47447860f, -0.35869038f,
352 -0.06418842f, -0.13502428f, -0.50176400f, 0.22830659f,
353 -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100354 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
355 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000356 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
357 std::vector<float> recurrentToOutputWeightsValue{ 0.43385774f, -0.17194885f, 0.27182370f, 0.09215671f,
358 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f,
359 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f,
360 -0.51818722f, -0.15390486f, 0.04681480f, 0.39922136f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100361 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000362 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
363 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100364 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000365 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
366 std::vector<float> cellToForgetWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100367 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000368 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
369 std::vector<float> cellToOutputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100370 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000371 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
372 std::vector<float> inputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100373 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000374 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
375 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100376 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000377 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
378 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100379 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000380 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
381 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100382 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
383 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000384 hidl_vec<uint32_t> projectionWeightsDimensions{0};
385 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100386 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000387 hidl_vec<uint32_t> projectionBiasDimensions{0};
388 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100389
390 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000391 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
392 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100393 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000394 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
395 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100396
Matteo Martincighc7434122018-11-14 12:27:04 +0000397 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100398 // 20: The activation function: A value indicating the activation function:
399 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000400 hidl_vec<uint32_t> activationFunctionDimensions{};
401 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100402 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
403 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000404 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
405 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100406 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
407 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000408 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
409 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100410
411 // Outputs:
412 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
413 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000414 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
415 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
416 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
417 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
418 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
419 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100420 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000421 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100422 std::vector<float> outputStateOutValue {-0.0297319f, 0.122947f, 0.208851f, -0.153588f};
423 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000424 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
telsoa01ce3e84a2018-08-31 09:31:35 +0100425 std::vector<float> cellStateOutValue {-0.145439f, 0.157475f, 0.293663f, -0.277353f};
426 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
427 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000428 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100429 std::vector<float> outputValue {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f};
430
431 LstmTestImpl(inputDimensions, inputValue,
432 inputToInputWeightsDimensions, inputToInputWeightsValue,
433 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
434 inputToCellWeightsDimensions, inputToCellWeightsValue,
435 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
436 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
437 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
438 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
439 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
440 cellToInputWeightsDimensions, cellToInputWeightsValue,
441 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
442 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
443 inputGateBiasDimensions, inputGateBiasValue,
444 forgetGateBiasDimensions, forgetGateBiasValue,
445 cellBiasDimensions, cellBiasValue,
446 outputGateBiasDimensions, outputGateBiasValue,
447 projectionWeightsDimensions, projectionWeightsValue,
448 projectionBiasDimensions, projectionBiasValue,
449 outputStateInDimensions, outputStateInValue,
450 cellStateInDimensions, cellStateInValue,
451 activationFunctionDimensions, activationFunctionValue,
452 cellClippingThresholdDimensions, cellClippingThresholdValue,
453 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
454 scratchBufferDimensions, scratchBufferValue,
455 outputStateOutDimensions, outputStateOutValue,
456 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +0000457 outputDimensions, outputValue,
458 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100459}
460
Matteo Martincighc7434122018-11-14 12:27:04 +0000461void LstmCifgPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100462{
463 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
464 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
465 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
466
Matteo Martincighc7434122018-11-14 12:27:04 +0000467 uint32_t batchSize = 1;
468 uint32_t inputSize = 2;
469 uint32_t numUnits = 4;
470 uint32_t outputSize = numUnits;
471
telsoa01ce3e84a2018-08-31 09:31:35 +0100472 // Inputs:
473 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
474 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000475 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
476 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100477
478 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
479 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000480 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
481 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100482 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
483 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000484 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
485 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
486 0.13056988f, -0.36333650f,
487 -0.22755712f, 0.28253698f,
488 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100489 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000490 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
491 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
492 -0.09624726f, 0.05100781f,
493 0.04717243f, 0.48944736f,
494 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100495 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
496 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000497 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
498 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
499 -0.55932593f, -0.09426838f,
500 -0.44257352f, 0.54939759f,
501 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100502 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
503 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
504 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000505 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
506 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100507 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
508 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000509 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
510 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
511 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
512 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
513 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100514 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
515 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000516 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
517 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
518 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
519 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
520 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100521 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
522 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000523 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
524 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
525 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
526 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
527 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100528 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000529 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
530 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100531 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000532 hidl_vec<uint32_t> cellToForgetWeightsDimensions{4};
533 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100534 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000535 hidl_vec<uint32_t> cellToOutputWeightsDimensions{4};
536 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100537 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000538 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
539 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100540 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000541 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
542 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100543 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000544 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
545 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100546 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000547 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
548 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100549 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
550 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000551 hidl_vec<uint32_t> projectionWeightsDimensions{0};
552 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100553 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000554 hidl_vec<uint32_t> projectionBiasDimensions{0};
555 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100556
557 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000558 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
559 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100560 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000561 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
562 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100563
Matteo Martincighc7434122018-11-14 12:27:04 +0000564 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100565 // 20: The activation function: A value indicating the activation function:
566 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000567 hidl_vec<uint32_t> activationFunctionDimensions{};
568 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100569 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
570 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000571 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
572 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100573 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
574 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000575 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
576 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100577
578 // Outputs:
579 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
580 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000581 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
582 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
583 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
584 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
585 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
586 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100587 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000588 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
589 std::vector<float> outputStateOutValue{-0.364445f, -0.00352185f, 0.128866f, -0.0516365f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100590 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000591 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
592 std::vector<float> cellStateOutValue{-0.760444f, -0.0180416f, 0.182264f, -0.0649371f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100593 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
594 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000595 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
596 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100597
598 LstmTestImpl(inputDimensions, inputValue,
599 inputToInputWeightsDimensions, inputToInputWeightsValue,
600 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
601 inputToCellWeightsDimensions, inputToCellWeightsValue,
602 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
603 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
604 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
605 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
606 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
607 cellToInputWeightsDimensions, cellToInputWeightsValue,
608 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
609 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
610 inputGateBiasDimensions, inputGateBiasValue,
611 forgetGateBiasDimensions, forgetGateBiasValue,
612 cellBiasDimensions, cellBiasValue,
613 outputGateBiasDimensions, outputGateBiasValue,
614 projectionWeightsDimensions, projectionWeightsValue,
615 projectionBiasDimensions, projectionBiasValue,
616 outputStateInDimensions, outputStateInValue,
617 cellStateInDimensions, cellStateInValue,
618 activationFunctionDimensions, activationFunctionValue,
619 cellClippingThresholdDimensions, cellClippingThresholdValue,
620 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
621 scratchBufferDimensions, scratchBufferValue,
622 outputStateOutDimensions, outputStateOutValue,
623 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +0000624 outputDimensions, outputValue,
625 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100626}
627
Matteo Martincighc7434122018-11-14 12:27:04 +0000628void LstmNoCifgPeepholeProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100629{
630 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm3.model.cpp
631 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm3.example.cpp
632 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
633
Matteo Martincighc7434122018-11-14 12:27:04 +0000634 uint32_t batchSize = 2;
635 uint32_t inputSize = 5;
636 uint32_t numUnits = 20;
637 uint32_t outputSize = 16;
638
telsoa01ce3e84a2018-08-31 09:31:35 +0100639 // Inputs:
640 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
641 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000642 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
643 std::vector<float> inputValue{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
644 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100645
646 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
647 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000648 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
649 std::vector<float> inputToInputWeightsValue
650 {
651 0.0213936830f, 0.0612455100f, 0.0469051670f, -0.0146576770f, -0.0314946300f,
652 0.0917180300f, 0.1464780100f, 0.1079719300f, -0.0057968358f, 0.0019193048f,
653 -0.2726754000f, 0.1015402900f, -0.0185398850f, 0.0803498850f, -0.1026238500f,
654 -0.0225997870f, -0.0912115500f, -0.0086759670f, -0.0452061030f, -0.0821282000f,
655 -0.0080459520f, 0.0154780810f, 0.0552172470f, 0.0387195870f, 0.0441536270f,
656 -0.0645324300f, 0.0503182500f, -0.0469351080f, -0.0081644309f, 0.0145742260f,
657 -0.1671009000f, -0.1551955200f, -0.1681979700f, -0.1397126900f, -0.1195305900f,
658 0.2500548700f, -0.2279098300f, 0.0098550870f, -0.0281409580f, -0.1120069800f,
659 0.1129540800f, -0.0035217577f, 0.0544850750f, 0.0518469500f, 0.0647112060f,
660 0.1098919300f, 0.1167478600f, 0.0349060700f, 0.0772735700f, 0.1139058500f,
661 -0.1863375000f, -0.1034451000f, -0.1394518900f, -0.0494012270f, -0.1876706300f,
662 0.0424839030f, 0.1423355200f, 0.1383258100f, 0.1835016500f, 0.1454560300f,
663 -0.0285457040f, 0.0249395310f, 0.0509297180f, 0.0076203286f, -0.0029723682f,
664 -0.0424842240f, -0.1182759600f, -0.0917110400f, -0.1080862800f, -0.1632798800f,
665 -0.2273378000f, -0.0993647000f, -0.0171551070f, 0.0023917493f, 0.0492727640f,
666 0.0038534778f, 0.0547645050f, 0.0897537840f, 0.0694723400f, 0.0801447600f,
667 -0.0454423400f, -0.0497073000f, -0.0713563100f, -0.0489291060f, -0.0040420120f,
668 -0.0092840260f, 0.0180420540f, 0.0036860977f, -0.0742730200f, -0.1143460400f,
669 -0.0189954560f, 0.0314875430f, 0.0128349080f, 0.0199777540f, 0.0442566540f,
670 -0.3929261300f, -0.1851933400f, -0.1165128100f, -0.0680989200f, 0.0113736770f
671 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100672 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
673 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000674 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
675 std::vector<float> inputToForgetWeightsValue
676 {
677 -0.0018401089f, -0.0048522370f, 0.0369842400f, 0.0141817040f, 0.0282732360f,
678 -0.0167261940f, -0.0524975900f, -0.1020426100f, 0.0086106600f, -0.0409795050f,
679 -0.0098991870f, 0.0192389200f, -0.0281772690f, -0.0853510300f, -0.1458549500f,
680 0.1066256700f, -0.0190973100f, -0.0178835340f, -0.0047269356f, -0.0451033230f,
681 0.0030784295f, 0.0767847750f, 0.0746369600f, 0.0945313950f, 0.0814421000f,
682 -0.1225789900f, -0.0339457580f, -0.0313034650f, 0.0456306260f, 0.0684388700f,
683 -0.1349294500f, -0.0124800070f, -0.0811829000f, -0.0722449900f, -0.0962879100f,
684 0.0451009460f, 0.0012300825f, 0.0139646620f, 0.0993723940f, 0.0254305900f,
685 0.0695832400f, 0.0342572960f, 0.0482646000f, 0.0626799700f, 0.0526250680f,
686 0.1278466600f, 0.0707789700f, 0.0257259350f, 0.0416500900f, 0.0724190500f,
687 0.0186686440f, -0.0373772940f, -0.0627778300f, -0.0883363600f, -0.0401206050f,
688 -0.0114055860f, -0.0078083350f, -0.0103013860f, -0.0051021670f, 0.0277174640f,
689 0.0548342300f, 0.1144911100f, 0.1128965200f, 0.1093983900f, 0.1339650600f,
690 -0.0840216600f, -0.0190146200f, -0.0446783040f, -0.0772056500f, 0.0143500630f,
691 -0.1175795800f, -0.0652038000f, -0.0818573300f, -0.0767543240f, -0.0926143750f,
692 0.1040549100f, 0.0529603360f, 0.0357558950f, 0.0358393860f, -0.0125405530f,
693 0.0368812980f, 0.0291337600f, 0.0342015900f, 0.0544844700f, -0.0545233530f,
694 0.0258271500f, 0.0232735500f, -0.0118571790f, -0.0011980024f, -0.0346417170f,
695 -0.0261250940f, -0.1758261500f, -0.1592365700f, -0.2748677400f, -0.0006143371f,
696 0.0001771948f, -8.470171e-05f, 0.0265180700f, 0.0457907650f, 0.069564960f
697 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100698 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000699 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
700 std::vector<float> inputToCellWeightsValue
701 {
702 -0.0458028300f, -0.0954946200f, -0.0324189850f, -0.0645463300f, -0.0435284530f,
703 0.0430185870f, -0.0491523440f, -0.1241814400f, -0.0789854750f, -0.0759688900f,
704 0.0194843620f, -0.1143496200f, -0.0074034138f, -0.0631484400f, -0.0929814950f,
705 0.0062155537f, -0.0250343380f, -0.0028890965f, 0.0489295270f, 0.0623507500f,
706 0.1066591800f, -0.0320367920f, -0.0850591600f, -0.1084335800f, -0.1300243300f,
707 -0.0368164370f, -0.0213013400f, -0.0165182390f, 0.0047691227f, -0.0025825808f,
708 0.0660178660f, 0.0299915340f, -0.1065283600f, -0.1037554000f, -0.1305607100f,
709 -0.0326664300f, -0.0337024140f, -0.0064734240f, -0.0461169200f, 0.0144193390f,
710 -0.0251743230f, 0.0396852000f, 0.0817775060f, 0.0615746800f, 0.1021009500f,
711 -0.0096581940f, 0.0465117170f, 0.0360390600f, 0.0069369148f, 0.0159600950f,
712 -0.0650766600f, 0.0955159800f, 0.0535688360f, 0.0640871400f, 0.1283566700f,
713 -0.0087143290f, -0.2021196600f, -0.1209367400f, 0.0294504720f, 0.2849013000f,
714 -0.0292279010f, 0.1164364000f, -0.0856026300f, 0.0994178600f, -0.0369995650f,
715 -0.0288426260f, -0.0033637602f, -0.0170129020f, -0.0972086500f, -0.1119335100f,
716 -0.0291551170f, -0.0179360340f, -0.0097689360f, -0.0422332400f, -0.0361596350f,
717 0.0650511200f, -0.0217428920f, -0.0233772120f, -0.0722136400f, -0.0643055200f,
718 0.0545386500f, 0.0911498140f, 0.0638733100f, 0.0075183930f, 0.0559609530f,
719 0.0697793440f, 0.0464111680f, 0.1050991100f, 0.0746389400f, 0.0075130584f,
720 0.0128509820f, 0.0455543100f, 0.0569556880f, 0.0655528500f, 0.0508014560f,
721 -0.0098626830f, 0.0082677200f, -0.0265556090f, -0.0073611983f, -0.0014897042f
722 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100723 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
724 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000725 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
726 std::vector<float> inputToOutputWeightsValue
727 {
728 -0.0998932000f, -0.0720195600f, -0.0528037730f, -0.1562959300f, -0.1500191800f,
729 -0.0765075100f, 0.0235985500f, -0.0751553550f, -0.0803770900f, -0.1509353400f,
730 0.0295175520f, -0.0475139300f, 0.0103505310f, -0.0266485100f, -0.0168397220f,
731 -0.0231211630f, 0.0077019283f, 0.0128512570f, -0.0504064900f, -0.0129761000f,
732 -0.0217377470f, -0.0383057930f, -0.0687058600f, -0.0148124700f, -0.0012853940f,
733 0.1012423600f, 0.0831228350f, 0.0533130060f, -0.0622356460f, -0.0756371540f,
734 -0.0278339030f, 0.0297749710f, 0.1130802000f, 0.0921890600f, 0.0950613500f,
735 -0.0866657640f, -0.0371627060f, -0.0388809140f, -0.0358328450f, -0.0144815640f,
736 -0.0982500300f, -0.1204856900f, -0.0976655860f, -0.0528763300f, -0.0964047000f,
737 -0.1136642900f, 0.0357775050f, 0.1356881900f, 0.0524513830f, 0.0506493040f,
738 0.0579895100f, -0.0218523350f, -0.0998488440f, 0.0147404750f, -0.0788979460f,
739 0.0497469900f, 0.0141604730f, 0.0697393200f, 0.0496494200f, 0.0333646460f,
740 0.0819012400f, 0.0255353670f, 0.0508931650f, 0.0485142540f, 0.0694581300f,
741 -0.0789075640f, -0.0670761600f, -0.1184450800f, -0.0998668800f, -0.0750940300f,
742 0.0626322600f, 0.1492558700f, 0.2018843600f, 0.1209845100f, 0.1463941500f,
743 0.0015017595f, -0.0142673820f, -0.0341725700f, 0.0127114680f, 0.0028300495f,
744 -0.0247584820f, -0.0509854800f, -0.0821182000f, 0.0142256720f, 0.0215441580f,
745 0.0894972500f, 0.0750526800f, -0.0020780868f, 0.0490825800f, 0.0647629500f,
746 -0.0229070630f, 0.0275624560f, 0.0401857350f, 0.0195675770f, -0.0155987390f,
747 -0.0490973030f, -0.0171218660f, -0.0833682340f, -0.0233200200f, -0.084095600f
748 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100749 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
750 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
751 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000752 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
753 std::vector<float> recurrentToInputWeightsValue
754 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100755 -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, // 00
756 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
757 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
758 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000759 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f, // 01
760 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100761 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000762 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100763 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f, // 02
Matteo Martincighc7434122018-11-14 12:27:04 +0000764 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100765 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000766 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100767 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f, // 03
Matteo Martincighc7434122018-11-14 12:27:04 +0000768 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100769 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
770 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
771 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f, // 04
772 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
773 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000774 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
775 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f, // 05
776 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
777 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
778 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100779 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f, // 06
780 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000781 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100782 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
783 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f, // 07
784 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
785 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
786 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
787 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +0000788 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100789 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000790 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100791 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f, // 09
792 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000793 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100794 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
795 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f, // 10
Matteo Martincighc7434122018-11-14 12:27:04 +0000796 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100797 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000798 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
799 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f, // 11
800 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
801 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
802 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
803 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f, // 12
telsoa01ce3e84a2018-08-31 09:31:35 +0100804 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
805 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
806 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
807 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f, // 13
808 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000809 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
810 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100811 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +0000812 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
813 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
814 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100815 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f, // 15
816 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000817 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100818 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
819 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f, // 16
Matteo Martincighc7434122018-11-14 12:27:04 +0000820 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
821 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100822 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
823 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +0000824 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
825 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
826 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
827 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f, // 18
telsoa01ce3e84a2018-08-31 09:31:35 +0100828 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
829 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
830 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
831 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, // 19
Matteo Martincighc7434122018-11-14 12:27:04 +0000832 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100833 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000834 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
835 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100836 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
837 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000838 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
839 std::vector<float> recurrentToForgetWeightsValue
840 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100841 -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, // 00
Matteo Martincighc7434122018-11-14 12:27:04 +0000842 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100843 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000844 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
845 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f, // 01
telsoa01ce3e84a2018-08-31 09:31:35 +0100846 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
847 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000848 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100849 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f, // 02
850 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000851 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100852 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
853 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f, // 03
854 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000855 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
856 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100857 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f, // 04
Matteo Martincighc7434122018-11-14 12:27:04 +0000858 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
859 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100860 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
861 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f, // 05
Matteo Martincighc7434122018-11-14 12:27:04 +0000862 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100863 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000864 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100865 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f, // 06
Matteo Martincighc7434122018-11-14 12:27:04 +0000866 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100867 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000868 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
869 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f, // 07
870 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100871 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
872 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
873 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +0000874 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
875 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
876 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
877 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f, // 09
telsoa01ce3e84a2018-08-31 09:31:35 +0100878 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000879 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
880 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100881 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f, // 10
882 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
883 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000884 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
885 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f, // 11
886 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
887 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100888 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
889 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f, // 12
Matteo Martincighc7434122018-11-14 12:27:04 +0000890 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100891 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
892 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
893 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f, // 13
894 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000895 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100896 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
897 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +0000898 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100899 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000900 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
901 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f, // 15
902 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
903 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
904 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
905 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f, // 16
telsoa01ce3e84a2018-08-31 09:31:35 +0100906 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000907 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100908 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
909 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +0000910 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100911 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
912 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000913 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f, // 18
914 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
915 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
916 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100917 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, // 19
918 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000919 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
920 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
921 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100922 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
923 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000924 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
925 std::vector<float> recurrentToCellWeightsValue
926 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100927 -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000928 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
929 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100930 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000931 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
932 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100933 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
934 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
935 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
936 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000937 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100938 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
939 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000940 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
941 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
942 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100943 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000944 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
945 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100946 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000947 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
948 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
949 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100950 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000951 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100952 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000953 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100954 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000955 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100956 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000957 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
958 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100959 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000960 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100961 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000962 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100963 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
964 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
965 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
966 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000967 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
968 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100969 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000970 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
971 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
972 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100973 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000974 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
975 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
976 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
977 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100978 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000979 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100980 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000981 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100982 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
983 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
984 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
985 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000986 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
987 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100988 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
989 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
990 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
991 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
992 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000993 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
994 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100995 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000996 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
997 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100998 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
999 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001000 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001001 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
1002 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001003 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001004 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
1005 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001006 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
1007 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001008 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1009 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001010 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1011 std::vector<float> recurrentToOutputWeightsValue
1012 {
1013 0.025825322f, -0.05813119f, 0.09495884f, -0.045984812f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001014 -0.01255415f, -0.0026479573f, -0.08196161f, -0.054914974f,
1015 -0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f,
1016 -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f,
1017 -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f,
1018 -0.023314456f, -0.007401714f, -0.09255757f, 0.029460307f,
1019 -0.08829125f, -0.005139627f, -0.08989442f, -0.0555066f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001020 0.13596267f, -0.025062224f, -0.048351806f, -0.03850004f,
1021 0.07266485f, -0.022414139f, 0.05940088f, 0.075114764f,
1022 0.09597592f, -0.010211725f, -0.0049794707f, -0.011523867f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001023 -0.025980417f, 0.072999895f, 0.11091378f, -0.081685916f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001024 0.014416728f, 0.043229222f, 0.034178585f, -0.07530371f,
1025 0.035837382f, -0.085607f, -0.007721233f, -0.03287832f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001026 -0.043848954f, -0.06404588f, -0.06632928f, -0.073643476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001027 0.008214239f, -0.045984086f, 0.039764922f, 0.03474462f,
1028 0.060612556f, -0.080590084f, 0.049127717f, 0.04151091f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001029 -0.030063879f, 0.008801774f, -0.023021035f, -0.019558564f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001030 0.05158114f, -0.010947698f, -0.011825728f, 0.0075720972f,
1031 0.0699727f, -0.0039981045f, 0.069350146f, 0.08799282f,
1032 0.016156472f, 0.035502106f, 0.11695009f, 0.006217345f,
1033 0.13392477f, -0.037875112f, 0.025745004f, 0.08940699f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001034 -0.00924166f, 0.0046702605f, -0.036598757f, -0.08811812f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001035 0.10522024f, -0.032441203f, 0.008176899f, -0.04454919f,
1036 0.07058152f, 0.0067963637f, 0.039206743f, 0.03259838f,
1037 0.03725492f, -0.09515802f, 0.013326398f, -0.052055415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001038 -0.025676316f, 0.03198509f, -0.015951829f, -0.058556724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001039 0.036879618f, 0.043357447f, 0.028362012f, -0.05908629f,
1040 0.0059240665f, -0.04995891f, -0.019187413f, 0.0276265f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001041 -0.01628143f, 0.0025863599f, 0.08800015f, 0.035250366f,
1042 -0.022165963f, -0.07328642f, -0.009415526f, -0.07455109f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001043 0.11690406f, 0.0363299f, 0.07411125f, 0.042103454f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001044 -0.009660886f, 0.019076364f, 0.018299393f, -0.046004917f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001045 0.08891175f, 0.0431396f, -0.026327137f, -0.051502608f,
1046 0.08979574f, -0.051670972f, 0.04940282f, -0.07491107f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001047 -0.021240504f, 0.022596184f, -0.034280192f, 0.060163025f,
1048 -0.058211457f, -0.051837247f, -0.01349775f, -0.04639988f,
1049 -0.035936575f, -0.011681591f, 0.064818054f, 0.0073146066f,
1050 -0.021745546f, -0.043124277f, -0.06471268f, -0.07053354f,
1051 -0.029321948f, -0.05330136f, 0.016933719f, -0.053782392f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001052 0.13747959f, -0.1361751f, -0.11569455f, 0.0033329215f,
1053 0.05693899f, -0.053219706f, 0.063698f, 0.07977434f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001054 -0.07924483f, 0.06936997f, 0.0034815092f, -0.007305279f,
1055 -0.037325785f, -0.07251102f, -0.033633437f, -0.08677009f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001056 0.091591336f, -0.14165086f, 0.021752775f, 0.019683983f,
1057 0.0011612234f, -0.058154266f, 0.049996935f, 0.0288841f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001058 -0.0024567875f, -0.14345716f, 0.010955264f, -0.10234828f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001059 0.1183656f, -0.0010731248f, -0.023590032f, -0.072285876f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001060 -0.0724771f, -0.026382286f, -0.0014920527f, 0.042667855f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001061 0.0018776858f, 0.02986552f, 0.009814309f, 0.0733756f,
1062 0.12289186f, 0.018043943f, -0.0458958f, 0.049412545f,
1063 0.033632483f, 0.05495232f, 0.036686596f, -0.013781798f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001064 -0.010036754f, 0.02576849f, -0.08307328f, 0.010112348f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001065 0.042521734f, -0.05869831f, -0.071689695f, 0.03876447f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001066 -0.13275425f, -0.0352966f, -0.023077697f, 0.10285965f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001067 0.084736146f, 0.15568255f, -0.00040734606f, 0.027835453f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001068 -0.10292561f, -0.032401145f, 0.10053256f, -0.026142767f,
1069 -0.08271222f, -0.0030240538f, -0.016368777f, 0.1070414f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001070 0.042672627f, 0.013456989f, -0.0437609f, -0.022309763f,
1071 0.11576483f, 0.04108048f, 0.061026827f, -0.0190714f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001072 -0.0869359f, 0.037901703f, 0.0610107f, 0.07202949f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001073 0.01675338f, 0.086139716f, -0.08795751f, -0.014898893f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001074 -0.023771819f, -0.01965048f, 0.007955471f, -0.043740474f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001075 0.03346837f, -0.10549954f, 0.090567775f, 0.042013682f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001076 -0.03176985f, 0.12569028f, -0.02421228f, -0.029526481f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001077 0.023851605f, 0.031539805f, 0.05292009f, -0.02344001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001078 -0.07811758f, -0.08834428f, 0.10094801f, 0.16594367f,
1079 -0.06861939f, -0.021256343f, -0.041093912f, -0.06669611f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001080 0.035498552f, 0.021757556f, -0.09302526f, -0.015403468f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001081 -0.06614931f, -0.051798206f, -0.013874718f, 0.03630673f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001082 0.010412845f, -0.08077351f, 0.046185967f, 0.0035662893f,
1083 0.03541868f, -0.094149634f, -0.034814864f, 0.003128424f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001084 -0.020674974f, -0.03944324f, -0.008110165f, -0.11113267f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001085 0.08484226f, 0.043586485f, 0.040582247f, 0.0968012f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001086 -0.065249965f, -0.028036479f, 0.0050708856f, 0.0017462453f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001087 0.0326779f, 0.041296225f, 0.09164146f, -0.047743853f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001088 -0.015952192f, -0.034451712f, 0.084197424f, -0.05347844f,
1089 -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001090 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001091 -0.057309967f, -0.012775832f, -0.0032452994f, 0.01977615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001092 -0.041040014f, -0.024264973f, 0.063464895f, 0.05431621f
1093 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001094 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001095 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1096 std::vector<float> cellToInputWeightsValue
1097 {
1098 0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
1099 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
1100 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f, -0.052169047f,
1101 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
1102 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001103 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001104 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1105 std::vector<float> cellToForgetWeightsValue
1106 {
1107 -0.01998659f, -0.15568835f, -0.24248174f, -0.012770197f, 0.041331276f,
1108 -0.072311886f, -0.052123554f, -0.0066330447f, -0.043891653f, 0.036225766f,
1109 -0.047248036f, 0.021479502f, 0.033189066f, 0.11952997f, -0.020432774f,
1110 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
1111 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001112 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001113 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1114 std::vector<float> cellToOutputWeightsValue
1115 {
1116 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
1117 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
1118 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
1119 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
1120 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001121 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001122 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1123 std::vector<float> inputGateBiasValue
1124 {
1125 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
1126 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
1127 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
1128 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
1129 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001130 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001131 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1132 std::vector<float> forgetGateBiasValue
1133 {
1134 0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, 0.11098921f,
1135 0.15378423f, 0.09263801f, 0.09790885f, 0.09508917f, 0.061199076f,
1136 0.07665568f, -0.015443159f, -0.03499149f, 0.046190713f, 0.08895977f,
1137 0.10899629f, 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
1138 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001139 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001140 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1141 std::vector<float> cellBiasValue
1142 {
1143 -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, -0.1483596f,
1144 -0.10639995f, -0.091433935f, 0.058573797f, -0.06809782f, -0.07889636f,
1145 -0.043246906f, -0.09829136f, -0.4279842f, 0.034901652f, 0.18797937f,
1146 0.0075234566f, 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
1147 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001148 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001149 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1150 std::vector<float> outputGateBiasValue
1151 {
1152 0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
1153 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
1154 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
1155 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
1156 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001157 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1158 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001159 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
1160 std::vector<float> projectionWeightsValue
1161 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001162 -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001163 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001164 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
1165 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001166 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
1167 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
1168 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
1169 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001170 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
1171 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
1172 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001173 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
1174 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
1175 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
1176 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
1177 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001178 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001179 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001180 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001181 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001182 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
1183 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001184 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001185 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001186 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001187 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
1188 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001189 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001190 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
1191 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
1192 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001193 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
1194 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001195 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001196 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
1197 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
1198 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
1199 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
1200 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001201 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
1202 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001203 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001204 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
1205 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001206 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
1207 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
1208 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001209 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
1210 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
1211 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001212 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001213 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001214 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
1215 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001216 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
1217 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
1218 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001219 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001220 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
1221 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
1222 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001223 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
1224 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
1225 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
1226 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001227 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001228 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1229 std::vector<float> projectionBiasValue(outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001230
1231 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001232 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1233 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001234 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001235 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1236 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001237
Matteo Martincighc7434122018-11-14 12:27:04 +00001238 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001239 // 20: The activation function: A value indicating the activation function:
1240 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001241 hidl_vec<uint32_t> activationFunctionDimensions{};
1242 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001243 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1244 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001245 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1246 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001247 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1248 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001249 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1250 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001251
1252 // Outputs:
1253 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1254 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001255 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1256 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1257 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1258 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1259 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
1260 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001261 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001262 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1263 std::vector<float> outputStateOutValue
1264 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001265 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835577f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001266 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415119f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001267 -0.013869f, 0.0287268f, -0.00334694f, 0.00733397f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001268 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.0216801f
1269 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001270 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001271 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1272 std::vector<float> cellStateOutValue
1273 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001274 -0.0531632f, -0.0118138f, 0.0870833f, 0.0347929f, -0.076144f,
1275 -0.0659219f, -0.0463811f, 0.0141307f, -0.0127706f, -0.03782f,
1276 -0.00402401f, -0.00571876f, -0.187957f, -0.0247127f, 0.0711425f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001277 0.008244f, 0.0492649f, 0.126972f, 0.0933097f, 0.29848f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001278 -0.0966178f, -0.114417f, 0.0387229f, 0.0453255f, -0.181286f,
1279 -0.0651251f, -0.0996879f, -0.00276995f, 0.0617558f, -0.0100728f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001280 0.056304f, -0.077416f, -0.162858f, -0.0541251f, 0.0571202f,
1281 -0.0525331f, 0.0724297f, 0.171029f, 0.141738f, 0.295483f
1282 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001283 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1284 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001285 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1286 std::vector<float> outputValue
1287 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001288 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001289 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001290 -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001291 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f
1292 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001293
1294 LstmTestImpl(inputDimensions, inputValue,
1295 inputToInputWeightsDimensions, inputToInputWeightsValue,
1296 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1297 inputToCellWeightsDimensions, inputToCellWeightsValue,
1298 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1299 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1300 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1301 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1302 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1303 cellToInputWeightsDimensions, cellToInputWeightsValue,
1304 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1305 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1306 inputGateBiasDimensions, inputGateBiasValue,
1307 forgetGateBiasDimensions, forgetGateBiasValue,
1308 cellBiasDimensions, cellBiasValue,
1309 outputGateBiasDimensions, outputGateBiasValue,
1310 projectionWeightsDimensions, projectionWeightsValue,
1311 projectionBiasDimensions, projectionBiasValue,
1312 outputStateInDimensions, outputStateInValue,
1313 cellStateInDimensions, cellStateInValue,
1314 activationFunctionDimensions, activationFunctionValue,
1315 cellClippingThresholdDimensions, cellClippingThresholdValue,
1316 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1317 scratchBufferDimensions, scratchBufferValue,
1318 outputStateOutDimensions, outputStateOutValue,
1319 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +00001320 outputDimensions, outputValue,
1321 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +01001322}
1323
Matteo Martincighc7434122018-11-14 12:27:04 +00001324void LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +01001325{
1326 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
1327 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
1328 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1329 // The batch size has been increased to 2 (it was 1 in the VTS test) with appropriate input and output values added.
1330
1331 uint32_t batchSize = 2;
1332 uint32_t inputSize = 2;
1333 uint32_t numUnits = 4;
1334 uint32_t outputSize = numUnits;
1335
1336 // Inputs:
1337 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1338 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +00001339 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1340 std::vector<float> inputValue{2.0f, 3.0f, 3.0f, 4.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001341
1342 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1343 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +00001344 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1345 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001346 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1347 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001348 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1349 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
1350 0.13056988f, -0.36333650f,
1351 -0.22755712f, 0.28253698f,
1352 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001353 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001354 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1355 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
1356 -0.09624726f, 0.05100781f,
1357 0.04717243f, 0.48944736f,
1358 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001359 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1360 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001361 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1362 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
1363 -0.55932593f, -0.09426838f,
1364 -0.44257352f, 0.54939759f,
1365 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001366 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1367 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1368 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +00001369 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
1370 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001371 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1372 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001373 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1374 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
1375 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
1376 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
1377 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001378 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1379 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001380 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1381 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
1382 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
1383 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
1384 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001385 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1386 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001387 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1388 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
1389 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
1390 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1391 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001392 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001393 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1394 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001395 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001396 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1397 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001398 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001399 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1400 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001401 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001402 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
1403 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001404 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001405 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1406 std::vector<float> forgetGateBiasValue{1.0f, 1.0f, 1.0f, 1.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001407 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001408 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1409 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001410 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001411 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1412 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001413 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1414 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001415 hidl_vec<uint32_t> projectionWeightsDimensions{0};
1416 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001417 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001418 hidl_vec<uint32_t> projectionBiasDimensions{0};
1419 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001420
1421 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001422 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1423 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001424 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001425 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1426 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001427
Matteo Martincighc7434122018-11-14 12:27:04 +00001428 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001429 // 20: The activation function: A value indicating the activation function:
1430 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001431 hidl_vec<uint32_t> activationFunctionDimensions{};
1432 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001433 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1434 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001435 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1436 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001437 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1438 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001439 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1440 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001441
1442 // Outputs:
1443 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1444 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001445 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1446 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1447 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1448 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1449 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1450 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001451 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001452 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1453 std::vector<float> outputStateOutValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1454 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001455 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001456 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1457 std::vector<float> cellStateOutValue{-0.76044439f, -0.01804161f, 0.18226376f, -0.06493707f,
1458 -0.90477051f, -0.04355603f, 0.18475688f, -0.04158677f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001459 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1460 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001461 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1462 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1463 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001464
1465 LstmTestImpl(inputDimensions, inputValue,
1466 inputToInputWeightsDimensions, inputToInputWeightsValue,
1467 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1468 inputToCellWeightsDimensions, inputToCellWeightsValue,
1469 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1470 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1471 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1472 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1473 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1474 cellToInputWeightsDimensions, cellToInputWeightsValue,
1475 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1476 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1477 inputGateBiasDimensions, inputGateBiasValue,
1478 forgetGateBiasDimensions, forgetGateBiasValue,
1479 cellBiasDimensions, cellBiasValue,
1480 outputGateBiasDimensions, outputGateBiasValue,
1481 projectionWeightsDimensions, projectionWeightsValue,
1482 projectionBiasDimensions, projectionBiasValue,
1483 outputStateInDimensions, outputStateInValue,
1484 cellStateInDimensions, cellStateInValue,
1485 activationFunctionDimensions, activationFunctionValue,
1486 cellClippingThresholdDimensions, cellClippingThresholdValue,
1487 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1488 scratchBufferDimensions, scratchBufferValue,
1489 outputStateOutDimensions, outputStateOutValue,
1490 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +00001491 outputDimensions, outputValue,
1492 compute);
1493}
1494
1495static const boost::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
1496
1497BOOST_DATA_TEST_CASE(LstmNoCifgNoPeepholeNoProjectionTest, COMPUTE_DEVICES)
1498{
1499 LstmNoCifgNoPeepholeNoProjection(sample);
1500}
1501
1502BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionTest, COMPUTE_DEVICES)
1503{
1504 LstmCifgPeepholeNoProjection(sample);
1505}
1506
1507BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionTest, COMPUTE_DEVICES)
1508{
1509 LstmNoCifgPeepholeProjection(sample);
1510}
1511
1512BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionBatch2Test, COMPUTE_DEVICES)
1513{
1514 LstmCifgPeepholeNoProjectionBatch2(sample);
telsoa01ce3e84a2018-08-31 09:31:35 +01001515}
1516
1517BOOST_AUTO_TEST_SUITE_END()