blob: ea0405c67323e6e91ee8a3d57404960ca6e9444e [file] [log] [blame]
telsoa01ce3e84a2018-08-31 09:31:35 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa01ce3e84a2018-08-31 09:31:35 +01004//
5#include "DriverTestHelpers.hpp"
Matteo Martincighc7434122018-11-14 12:27:04 +00006#include "OperationsUtils.h"
7
8#include <boost/array.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +01009#include <boost/test/unit_test.hpp>
Matteo Martincighc7434122018-11-14 12:27:04 +000010#include <boost/test/data/test_case.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010011#include <boost/math/special_functions/relative_difference.hpp>
12#include <log/log.h>
13
telsoa01ce3e84a2018-08-31 09:31:35 +010014#include <cmath>
15
16BOOST_AUTO_TEST_SUITE(LstmTests)
17
18using ArmnnDriver = armnn_driver::ArmnnDriver;
19using DriverOptions = armnn_driver::DriverOptions;
20using namespace driverTestHelpers;
21using namespace android::hardware;
22
23namespace
24{
25
26template<typename T>
Matteo Martincighc7434122018-11-14 12:27:04 +000027RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
telsoa01ce3e84a2018-08-31 09:31:35 +010028{
29 DataLocation inputInloc = {};
30 inputInloc.poolIndex = poolIndex;
31 inputInloc.offset = 0;
32 inputInloc.length = value.size() * sizeof(T);
33 RequestArgument inputRequestArgument = {};
34 inputRequestArgument.location = inputInloc;
35 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
36 return inputRequestArgument;
37}
38
39// Returns true if the relative difference between two float values is less than the tolerance value given.
40// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
41bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f)
42{
43 float rd;
44 if (a == 0.0f)
45 {
46 rd = fabs(b);
47 }
48 else if (b == 0.0f)
49 {
50 rd = fabs(a);
51 }
52 else
53 {
54 rd = boost::math::relative_difference(a, b);
55 }
56 return rd < tolerance;
57}
58
Matteo Martincighc7434122018-11-14 12:27:04 +000059} // anonymous namespace
telsoa01ce3e84a2018-08-31 09:31:35 +010060
61// Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
62
Matteo Martincighc7434122018-11-14 12:27:04 +000063void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
64 const std::vector<float>& inputValue,
65 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
66 const std::vector<float>& inputToInputWeightsValue,
67 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
68 const std::vector<float>& inputToForgetWeightsValue,
69 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
70 const std::vector<float>& inputToCellWeightsValue,
71 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
72 const std::vector<float>& inputToOutputWeightsValue,
73 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
74 const std::vector<float>& recurrentToInputWeightsValue,
75 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
76 const std::vector<float>& recurrentToForgetWeightsValue,
77 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
78 const std::vector<float>& recurrentToCellWeightsValue,
79 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
80 const std::vector<float>& recurrentToOutputWeightsValue,
81 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
82 const std::vector<float>& cellToInputWeightsValue,
83 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
84 const std::vector<float>& cellToForgetWeightsValue,
85 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
86 const std::vector<float>& cellToOutputWeightsValue,
87 const hidl_vec<uint32_t>& inputGateBiasDimensions,
88 const std::vector<float>& inputGateBiasValue,
89 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
90 const std::vector<float>& forgetGateBiasValue,
91 const hidl_vec<uint32_t>& cellBiasDimensions,
92 const std::vector<float>& cellBiasValue,
93 const hidl_vec<uint32_t>& outputGateBiasDimensions,
94 const std::vector<float>& outputGateBiasValue,
95 const hidl_vec<uint32_t>& projectionWeightsDimensions,
96 const std::vector<float>& projectionWeightsValue,
97 const hidl_vec<uint32_t>& projectionBiasDimensions,
98 const std::vector<float>& projectionBiasValue,
99 const hidl_vec<uint32_t>& outputStateInDimensions,
100 const std::vector<float>& outputStateInValue,
101 const hidl_vec<uint32_t>& cellStateInDimensions,
102 const std::vector<float>& cellStateInValue,
103 const hidl_vec<uint32_t>& activationFunctionDimensions,
104 const std::vector<int32_t>& activationFunctionValue,
105 const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
106 const std::vector<float>& cellClippingThresholdValue,
107 const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
108 const std::vector<float>& projectionClippingThresholdValue,
109 const hidl_vec<uint32_t>& scratchBufferDimensions,
110 const std::vector<float>& scratchBufferValue,
111 const hidl_vec<uint32_t>& outputStateOutDimensions,
112 const std::vector<float>& outputStateOutValue,
113 const hidl_vec<uint32_t>& cellStateOutDimensions,
114 const std::vector<float>& cellStateOutValue,
115 const hidl_vec<uint32_t>& outputDimensions,
116 const std::vector<float>& outputValue,
117 armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100118{
Matteo Martincighc7434122018-11-14 12:27:04 +0000119 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
Matteo Martincigh8b287c22018-09-07 09:25:10 +0100120 V1_0::Model model = {};
telsoa01ce3e84a2018-08-31 09:31:35 +0100121
122 // Inputs:
123 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
124 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
125 AddInputOperand(model, inputDimensions);
126
127 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
128 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
129 AddTensorOperand(model, inputToInputWeightsDimensions, inputToInputWeightsValue);
130 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
131 // [num_units, input_size].
132 AddTensorOperand(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
133 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
134 AddTensorOperand(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
135 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
136 // [num_units, input_size].
137 AddTensorOperand(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
138 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
139 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
140 // “num_units”), or the second dimension of the “projection_weights”, if defined.
141 AddTensorOperand(model, recurrentToInputWeightsDimensions, recurrentToInputWeightsValue);
142 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
143 // [num_units, output_size].
144 AddTensorOperand(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
145 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
146 // [num_units, output_size].
147 AddTensorOperand(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
148 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
149 // [num_units, output_size].
150 AddTensorOperand(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
151 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
152 AddTensorOperand(model, cellToInputWeightsDimensions, cellToInputWeightsValue);
153 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
154 AddTensorOperand(model, cellToForgetWeightsDimensions, cellToForgetWeightsValue);
155 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
156 AddTensorOperand(model, cellToOutputWeightsDimensions, cellToOutputWeightsValue);
157 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
158 AddTensorOperand(model, inputGateBiasDimensions, inputGateBiasValue);
159 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
160 AddTensorOperand(model, forgetGateBiasDimensions, forgetGateBiasValue);
161 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
162 AddTensorOperand(model, cellBiasDimensions, cellBiasValue);
163 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
164 AddTensorOperand(model, outputGateBiasDimensions, outputGateBiasValue);
165 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
166 // [output_size, num_units].
167 AddTensorOperand(model, projectionWeightsDimensions, projectionWeightsValue);
168 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
169 AddTensorOperand(model, projectionBiasDimensions, projectionBiasValue);
170
171 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
172 AddInputOperand(model, outputStateInDimensions);
173 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
174 AddInputOperand(model, cellStateInDimensions);
175
Matteo Martincighc7434122018-11-14 12:27:04 +0000176 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100177 // 20: The activation function: A value indicating the activation function:
178 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
179 AddTensorOperand(model, activationFunctionDimensions,
180 activationFunctionValue, OperandType::INT32);
181 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
182 // If set to 0.0 then clipping is disabled.
183 AddTensorOperand(model, cellClippingThresholdDimensions,
184 cellClippingThresholdValue, OperandType::FLOAT32);
185 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
186 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
187 AddTensorOperand(model, projectionClippingThresholdDimensions,
188 projectionClippingThresholdValue, OperandType::FLOAT32);
189
190 // Outputs:
191 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
192 // CIFG, or [batch_size, num_units * 3] without CIFG.
193 AddOutputOperand(model, scratchBufferDimensions);
194 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
195 AddOutputOperand(model, outputStateOutDimensions);
196 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
197 AddOutputOperand(model, cellStateOutDimensions);
198 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
199 // effectively the same as the current “output state (out)” value.
200 AddOutputOperand(model, outputDimensions);
201
202 // make the lstm operation
203 model.operations.resize(1);
Matteo Martincigh8b287c22018-09-07 09:25:10 +0100204 model.operations[0].type = V1_0::OperationType::LSTM;
telsoa01ce3e84a2018-08-31 09:31:35 +0100205 model.operations[0].inputs =
206 hidl_vec<uint32_t> {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
207 model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};
208
209 // define the input values
210 hidl_vec<RequestArgument> inputArguments;
211 inputArguments.resize(3);
212
213 inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
214 inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
215 inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
216
217 // define the expected output values
218 hidl_vec<RequestArgument> outputArguments;
219 outputArguments.resize(4);
220
221 outputArguments[0] = CreateRequestArgument<float>(scratchBufferValue, 3);
222 outputArguments[1] = CreateRequestArgument<float>(outputStateOutValue, 4);
223 outputArguments[2] = CreateRequestArgument<float>(cellStateOutValue, 5);
224 outputArguments[3] = CreateRequestArgument<float>(outputValue, 6);
225
226 Request request = {};
227 request.inputs = inputArguments;
228 request.outputs = outputArguments;
229
230 // set the input data
231 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
232 AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
233 AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
234
235 // add memory for the outputs
236 AddPoolAndGetData(scratchBufferValue.size(), request);
237 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData(outputStateOutValue.size(), request);
238 float* outputStateOutData = static_cast<float*>(static_cast<void*>(outputStateOutMemory->getPointer()));
239 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData(cellStateOutValue.size(), request);
240 float* cellStateOutData = static_cast<float*>(static_cast<void*>(cellStateOutMemory->getPointer()));
241 android::sp<IMemory> outputMemory = AddPoolAndGetData(outputValue.size(), request);
242 float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
243
244 // make the prepared model and run the execution
245 android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
246 if (preparedModel.get() != nullptr)
247 {
248 Execute(preparedModel, request);
249 }
250
251 // check the results
252 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
253 {
254 BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
255 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
256 }
257 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
258 {
259 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
260 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
261 }
262 for (size_t i = 0; i < outputValue.size(); ++i)
263 {
264 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
265 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
266 }
267}
268
Matteo Martincighc7434122018-11-14 12:27:04 +0000269void LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100270{
271 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm.model.cpp
272 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm.example.cpp
273 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
274
Matteo Martincighc7434122018-11-14 12:27:04 +0000275 uint32_t batchSize = 1;
276 uint32_t inputSize = 2;
277 uint32_t numUnits = 4;
278 uint32_t outputSize = numUnits;
279
telsoa01ce3e84a2018-08-31 09:31:35 +0100280 // Inputs:
281 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
282 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000283 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
284 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100285
286 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
287 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000288 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
289 std::vector<float> inputToInputWeightsValue{-0.45018822f, -0.02338299f,
290 -0.08705890f, -0.34550029f,
291 0.04266912f, -0.15680569f,
292 -0.34856534f, 0.43890524f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100293 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
294 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000295 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
296 std::vector<float> inputToForgetWeightsValue{ 0.09701663f, 0.20334584f,
297 -0.50592935f, -0.31343272f,
298 -0.40032279f, 0.44781327f,
299 0.01387155f, -0.35593212f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100300 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000301 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
302 std::vector<float> inputToCellWeightsValue{-0.50013041f, 0.13702840f,
303 0.11810488f, 0.20131630f,
304 -0.20583314f, 0.44344562f,
305 0.22077113f, -0.29909778f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100306 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
307 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000308 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
309 std::vector<float> inputToOutputWeightsValue{-0.25065863f, -0.28290087f,
310 0.04613829f, 0.40525138f,
311 0.44272184f, 0.03897077f,
312 -0.15568960f, 0.19487578f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100313 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
314 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
315 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000316 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
317 std::vector<float> recurrentToInputWeightsValue{-0.00635350f, -0.20423880f, 0.31454784f, -0.35746509f,
318 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f,
319 -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f,
320 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100321 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
322 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000323 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
324 std::vector<float> recurrentToForgetWeightsValue{-0.48684245f, -0.06655136f, 0.42224967f, 0.21126390f,
325 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f,
326 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f,
327 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100328 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
329 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000330 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
331 std::vector<float> recurrentToCellWeightsValue{-0.34074140f, 0.24443203f, -0.20785320f, 0.26320225f,
332 0.05695659f, -0.00123841f, -0.47447860f, -0.35869038f,
333 -0.06418842f, -0.13502428f, -0.50176400f, 0.22830659f,
334 -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100335 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
336 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000337 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
338 std::vector<float> recurrentToOutputWeightsValue{ 0.43385774f, -0.17194885f, 0.27182370f, 0.09215671f,
339 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f,
340 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f,
341 -0.51818722f, -0.15390486f, 0.04681480f, 0.39922136f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100342 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000343 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
344 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100345 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000346 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
347 std::vector<float> cellToForgetWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100348 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000349 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
350 std::vector<float> cellToOutputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100351 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000352 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
353 std::vector<float> inputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100354 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000355 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
356 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100357 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000358 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
359 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100360 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000361 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
362 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100363 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
364 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000365 hidl_vec<uint32_t> projectionWeightsDimensions{0};
366 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100367 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000368 hidl_vec<uint32_t> projectionBiasDimensions{0};
369 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100370
371 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000372 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
373 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100374 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000375 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
376 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100377
Matteo Martincighc7434122018-11-14 12:27:04 +0000378 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100379 // 20: The activation function: A value indicating the activation function:
380 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000381 hidl_vec<uint32_t> activationFunctionDimensions{};
382 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100383 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
384 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000385 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
386 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100387 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
388 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000389 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
390 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100391
392 // Outputs:
393 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
394 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000395 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
396 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
397 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
398 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
399 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
400 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100401 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000402 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100403 std::vector<float> outputStateOutValue {-0.0297319f, 0.122947f, 0.208851f, -0.153588f};
404 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000405 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
telsoa01ce3e84a2018-08-31 09:31:35 +0100406 std::vector<float> cellStateOutValue {-0.145439f, 0.157475f, 0.293663f, -0.277353f};
407 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
408 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000409 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100410 std::vector<float> outputValue {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f};
411
412 LstmTestImpl(inputDimensions, inputValue,
413 inputToInputWeightsDimensions, inputToInputWeightsValue,
414 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
415 inputToCellWeightsDimensions, inputToCellWeightsValue,
416 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
417 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
418 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
419 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
420 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
421 cellToInputWeightsDimensions, cellToInputWeightsValue,
422 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
423 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
424 inputGateBiasDimensions, inputGateBiasValue,
425 forgetGateBiasDimensions, forgetGateBiasValue,
426 cellBiasDimensions, cellBiasValue,
427 outputGateBiasDimensions, outputGateBiasValue,
428 projectionWeightsDimensions, projectionWeightsValue,
429 projectionBiasDimensions, projectionBiasValue,
430 outputStateInDimensions, outputStateInValue,
431 cellStateInDimensions, cellStateInValue,
432 activationFunctionDimensions, activationFunctionValue,
433 cellClippingThresholdDimensions, cellClippingThresholdValue,
434 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
435 scratchBufferDimensions, scratchBufferValue,
436 outputStateOutDimensions, outputStateOutValue,
437 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +0000438 outputDimensions, outputValue,
439 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100440}
441
Matteo Martincighc7434122018-11-14 12:27:04 +0000442void LstmCifgPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100443{
444 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
445 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
446 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
447
Matteo Martincighc7434122018-11-14 12:27:04 +0000448 uint32_t batchSize = 1;
449 uint32_t inputSize = 2;
450 uint32_t numUnits = 4;
451 uint32_t outputSize = numUnits;
452
telsoa01ce3e84a2018-08-31 09:31:35 +0100453 // Inputs:
454 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
455 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000456 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
457 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100458
459 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
460 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000461 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
462 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100463 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
464 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000465 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
466 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
467 0.13056988f, -0.36333650f,
468 -0.22755712f, 0.28253698f,
469 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100470 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000471 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
472 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
473 -0.09624726f, 0.05100781f,
474 0.04717243f, 0.48944736f,
475 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100476 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
477 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000478 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
479 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
480 -0.55932593f, -0.09426838f,
481 -0.44257352f, 0.54939759f,
482 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100483 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
484 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
485 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000486 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
487 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100488 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
489 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000490 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
491 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
492 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
493 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
494 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100495 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
496 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000497 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
498 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
499 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
500 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
501 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100502 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
503 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000504 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
505 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
506 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
507 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
508 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100509 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000510 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
511 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100512 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000513 hidl_vec<uint32_t> cellToForgetWeightsDimensions{4};
514 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100515 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000516 hidl_vec<uint32_t> cellToOutputWeightsDimensions{4};
517 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100518 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000519 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
520 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100521 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000522 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
523 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100524 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000525 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
526 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100527 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000528 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
529 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100530 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
531 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000532 hidl_vec<uint32_t> projectionWeightsDimensions{0};
533 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100534 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000535 hidl_vec<uint32_t> projectionBiasDimensions{0};
536 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100537
538 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000539 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
540 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100541 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000542 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
543 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100544
Matteo Martincighc7434122018-11-14 12:27:04 +0000545 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100546 // 20: The activation function: A value indicating the activation function:
547 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000548 hidl_vec<uint32_t> activationFunctionDimensions{};
549 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100550 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
551 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000552 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
553 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100554 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
555 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000556 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
557 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100558
559 // Outputs:
560 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
561 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000562 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
563 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
564 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
565 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
566 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
567 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100568 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000569 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
570 std::vector<float> outputStateOutValue{-0.364445f, -0.00352185f, 0.128866f, -0.0516365f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100571 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000572 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
573 std::vector<float> cellStateOutValue{-0.760444f, -0.0180416f, 0.182264f, -0.0649371f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100574 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
575 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000576 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
577 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100578
579 LstmTestImpl(inputDimensions, inputValue,
580 inputToInputWeightsDimensions, inputToInputWeightsValue,
581 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
582 inputToCellWeightsDimensions, inputToCellWeightsValue,
583 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
584 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
585 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
586 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
587 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
588 cellToInputWeightsDimensions, cellToInputWeightsValue,
589 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
590 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
591 inputGateBiasDimensions, inputGateBiasValue,
592 forgetGateBiasDimensions, forgetGateBiasValue,
593 cellBiasDimensions, cellBiasValue,
594 outputGateBiasDimensions, outputGateBiasValue,
595 projectionWeightsDimensions, projectionWeightsValue,
596 projectionBiasDimensions, projectionBiasValue,
597 outputStateInDimensions, outputStateInValue,
598 cellStateInDimensions, cellStateInValue,
599 activationFunctionDimensions, activationFunctionValue,
600 cellClippingThresholdDimensions, cellClippingThresholdValue,
601 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
602 scratchBufferDimensions, scratchBufferValue,
603 outputStateOutDimensions, outputStateOutValue,
604 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +0000605 outputDimensions, outputValue,
606 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100607}
608
Matteo Martincighc7434122018-11-14 12:27:04 +0000609void LstmNoCifgPeepholeProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100610{
611 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm3.model.cpp
612 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm3.example.cpp
613 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
614
Matteo Martincighc7434122018-11-14 12:27:04 +0000615 uint32_t batchSize = 2;
616 uint32_t inputSize = 5;
617 uint32_t numUnits = 20;
618 uint32_t outputSize = 16;
619
telsoa01ce3e84a2018-08-31 09:31:35 +0100620 // Inputs:
621 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
622 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000623 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
624 std::vector<float> inputValue{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
625 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100626
627 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
628 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000629 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
630 std::vector<float> inputToInputWeightsValue
631 {
632 0.0213936830f, 0.0612455100f, 0.0469051670f, -0.0146576770f, -0.0314946300f,
633 0.0917180300f, 0.1464780100f, 0.1079719300f, -0.0057968358f, 0.0019193048f,
634 -0.2726754000f, 0.1015402900f, -0.0185398850f, 0.0803498850f, -0.1026238500f,
635 -0.0225997870f, -0.0912115500f, -0.0086759670f, -0.0452061030f, -0.0821282000f,
636 -0.0080459520f, 0.0154780810f, 0.0552172470f, 0.0387195870f, 0.0441536270f,
637 -0.0645324300f, 0.0503182500f, -0.0469351080f, -0.0081644309f, 0.0145742260f,
638 -0.1671009000f, -0.1551955200f, -0.1681979700f, -0.1397126900f, -0.1195305900f,
639 0.2500548700f, -0.2279098300f, 0.0098550870f, -0.0281409580f, -0.1120069800f,
640 0.1129540800f, -0.0035217577f, 0.0544850750f, 0.0518469500f, 0.0647112060f,
641 0.1098919300f, 0.1167478600f, 0.0349060700f, 0.0772735700f, 0.1139058500f,
642 -0.1863375000f, -0.1034451000f, -0.1394518900f, -0.0494012270f, -0.1876706300f,
643 0.0424839030f, 0.1423355200f, 0.1383258100f, 0.1835016500f, 0.1454560300f,
644 -0.0285457040f, 0.0249395310f, 0.0509297180f, 0.0076203286f, -0.0029723682f,
645 -0.0424842240f, -0.1182759600f, -0.0917110400f, -0.1080862800f, -0.1632798800f,
646 -0.2273378000f, -0.0993647000f, -0.0171551070f, 0.0023917493f, 0.0492727640f,
647 0.0038534778f, 0.0547645050f, 0.0897537840f, 0.0694723400f, 0.0801447600f,
648 -0.0454423400f, -0.0497073000f, -0.0713563100f, -0.0489291060f, -0.0040420120f,
649 -0.0092840260f, 0.0180420540f, 0.0036860977f, -0.0742730200f, -0.1143460400f,
650 -0.0189954560f, 0.0314875430f, 0.0128349080f, 0.0199777540f, 0.0442566540f,
651 -0.3929261300f, -0.1851933400f, -0.1165128100f, -0.0680989200f, 0.0113736770f
652 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100653 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
654 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000655 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
656 std::vector<float> inputToForgetWeightsValue
657 {
658 -0.0018401089f, -0.0048522370f, 0.0369842400f, 0.0141817040f, 0.0282732360f,
659 -0.0167261940f, -0.0524975900f, -0.1020426100f, 0.0086106600f, -0.0409795050f,
660 -0.0098991870f, 0.0192389200f, -0.0281772690f, -0.0853510300f, -0.1458549500f,
661 0.1066256700f, -0.0190973100f, -0.0178835340f, -0.0047269356f, -0.0451033230f,
662 0.0030784295f, 0.0767847750f, 0.0746369600f, 0.0945313950f, 0.0814421000f,
663 -0.1225789900f, -0.0339457580f, -0.0313034650f, 0.0456306260f, 0.0684388700f,
664 -0.1349294500f, -0.0124800070f, -0.0811829000f, -0.0722449900f, -0.0962879100f,
665 0.0451009460f, 0.0012300825f, 0.0139646620f, 0.0993723940f, 0.0254305900f,
666 0.0695832400f, 0.0342572960f, 0.0482646000f, 0.0626799700f, 0.0526250680f,
667 0.1278466600f, 0.0707789700f, 0.0257259350f, 0.0416500900f, 0.0724190500f,
668 0.0186686440f, -0.0373772940f, -0.0627778300f, -0.0883363600f, -0.0401206050f,
669 -0.0114055860f, -0.0078083350f, -0.0103013860f, -0.0051021670f, 0.0277174640f,
670 0.0548342300f, 0.1144911100f, 0.1128965200f, 0.1093983900f, 0.1339650600f,
671 -0.0840216600f, -0.0190146200f, -0.0446783040f, -0.0772056500f, 0.0143500630f,
672 -0.1175795800f, -0.0652038000f, -0.0818573300f, -0.0767543240f, -0.0926143750f,
673 0.1040549100f, 0.0529603360f, 0.0357558950f, 0.0358393860f, -0.0125405530f,
674 0.0368812980f, 0.0291337600f, 0.0342015900f, 0.0544844700f, -0.0545233530f,
675 0.0258271500f, 0.0232735500f, -0.0118571790f, -0.0011980024f, -0.0346417170f,
676 -0.0261250940f, -0.1758261500f, -0.1592365700f, -0.2748677400f, -0.0006143371f,
677 0.0001771948f, -8.470171e-05f, 0.0265180700f, 0.0457907650f, 0.069564960f
678 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100679 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000680 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
681 std::vector<float> inputToCellWeightsValue
682 {
683 -0.0458028300f, -0.0954946200f, -0.0324189850f, -0.0645463300f, -0.0435284530f,
684 0.0430185870f, -0.0491523440f, -0.1241814400f, -0.0789854750f, -0.0759688900f,
685 0.0194843620f, -0.1143496200f, -0.0074034138f, -0.0631484400f, -0.0929814950f,
686 0.0062155537f, -0.0250343380f, -0.0028890965f, 0.0489295270f, 0.0623507500f,
687 0.1066591800f, -0.0320367920f, -0.0850591600f, -0.1084335800f, -0.1300243300f,
688 -0.0368164370f, -0.0213013400f, -0.0165182390f, 0.0047691227f, -0.0025825808f,
689 0.0660178660f, 0.0299915340f, -0.1065283600f, -0.1037554000f, -0.1305607100f,
690 -0.0326664300f, -0.0337024140f, -0.0064734240f, -0.0461169200f, 0.0144193390f,
691 -0.0251743230f, 0.0396852000f, 0.0817775060f, 0.0615746800f, 0.1021009500f,
692 -0.0096581940f, 0.0465117170f, 0.0360390600f, 0.0069369148f, 0.0159600950f,
693 -0.0650766600f, 0.0955159800f, 0.0535688360f, 0.0640871400f, 0.1283566700f,
694 -0.0087143290f, -0.2021196600f, -0.1209367400f, 0.0294504720f, 0.2849013000f,
695 -0.0292279010f, 0.1164364000f, -0.0856026300f, 0.0994178600f, -0.0369995650f,
696 -0.0288426260f, -0.0033637602f, -0.0170129020f, -0.0972086500f, -0.1119335100f,
697 -0.0291551170f, -0.0179360340f, -0.0097689360f, -0.0422332400f, -0.0361596350f,
698 0.0650511200f, -0.0217428920f, -0.0233772120f, -0.0722136400f, -0.0643055200f,
699 0.0545386500f, 0.0911498140f, 0.0638733100f, 0.0075183930f, 0.0559609530f,
700 0.0697793440f, 0.0464111680f, 0.1050991100f, 0.0746389400f, 0.0075130584f,
701 0.0128509820f, 0.0455543100f, 0.0569556880f, 0.0655528500f, 0.0508014560f,
702 -0.0098626830f, 0.0082677200f, -0.0265556090f, -0.0073611983f, -0.0014897042f
703 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100704 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
705 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000706 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
707 std::vector<float> inputToOutputWeightsValue
708 {
709 -0.0998932000f, -0.0720195600f, -0.0528037730f, -0.1562959300f, -0.1500191800f,
710 -0.0765075100f, 0.0235985500f, -0.0751553550f, -0.0803770900f, -0.1509353400f,
711 0.0295175520f, -0.0475139300f, 0.0103505310f, -0.0266485100f, -0.0168397220f,
712 -0.0231211630f, 0.0077019283f, 0.0128512570f, -0.0504064900f, -0.0129761000f,
713 -0.0217377470f, -0.0383057930f, -0.0687058600f, -0.0148124700f, -0.0012853940f,
714 0.1012423600f, 0.0831228350f, 0.0533130060f, -0.0622356460f, -0.0756371540f,
715 -0.0278339030f, 0.0297749710f, 0.1130802000f, 0.0921890600f, 0.0950613500f,
716 -0.0866657640f, -0.0371627060f, -0.0388809140f, -0.0358328450f, -0.0144815640f,
717 -0.0982500300f, -0.1204856900f, -0.0976655860f, -0.0528763300f, -0.0964047000f,
718 -0.1136642900f, 0.0357775050f, 0.1356881900f, 0.0524513830f, 0.0506493040f,
719 0.0579895100f, -0.0218523350f, -0.0998488440f, 0.0147404750f, -0.0788979460f,
720 0.0497469900f, 0.0141604730f, 0.0697393200f, 0.0496494200f, 0.0333646460f,
721 0.0819012400f, 0.0255353670f, 0.0508931650f, 0.0485142540f, 0.0694581300f,
722 -0.0789075640f, -0.0670761600f, -0.1184450800f, -0.0998668800f, -0.0750940300f,
723 0.0626322600f, 0.1492558700f, 0.2018843600f, 0.1209845100f, 0.1463941500f,
724 0.0015017595f, -0.0142673820f, -0.0341725700f, 0.0127114680f, 0.0028300495f,
725 -0.0247584820f, -0.0509854800f, -0.0821182000f, 0.0142256720f, 0.0215441580f,
726 0.0894972500f, 0.0750526800f, -0.0020780868f, 0.0490825800f, 0.0647629500f,
727 -0.0229070630f, 0.0275624560f, 0.0401857350f, 0.0195675770f, -0.0155987390f,
728 -0.0490973030f, -0.0171218660f, -0.0833682340f, -0.0233200200f, -0.084095600f
729 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100730 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
731 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
732 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000733 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
734 std::vector<float> recurrentToInputWeightsValue
735 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100736 -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, // 00
737 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
738 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
739 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000740 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f, // 01
741 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100742 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000743 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100744 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f, // 02
Matteo Martincighc7434122018-11-14 12:27:04 +0000745 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100746 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000747 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100748 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f, // 03
Matteo Martincighc7434122018-11-14 12:27:04 +0000749 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100750 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
751 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
752 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f, // 04
753 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
754 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000755 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
756 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f, // 05
757 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
758 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
759 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100760 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f, // 06
761 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000762 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100763 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
764 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f, // 07
765 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
766 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
767 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
768 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +0000769 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100770 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000771 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100772 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f, // 09
773 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000774 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100775 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
776 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f, // 10
Matteo Martincighc7434122018-11-14 12:27:04 +0000777 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100778 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000779 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
780 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f, // 11
781 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
782 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
783 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
784 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f, // 12
telsoa01ce3e84a2018-08-31 09:31:35 +0100785 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
786 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
787 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
788 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f, // 13
789 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000790 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
791 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100792 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +0000793 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
794 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
795 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100796 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f, // 15
797 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000798 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100799 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
800 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f, // 16
Matteo Martincighc7434122018-11-14 12:27:04 +0000801 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
802 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100803 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
804 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +0000805 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
806 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
807 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
808 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f, // 18
telsoa01ce3e84a2018-08-31 09:31:35 +0100809 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
810 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
811 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
812 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, // 19
Matteo Martincighc7434122018-11-14 12:27:04 +0000813 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100814 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000815 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
816 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100817 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
818 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000819 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
820 std::vector<float> recurrentToForgetWeightsValue
821 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100822 -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, // 00
Matteo Martincighc7434122018-11-14 12:27:04 +0000823 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100824 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000825 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
826 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f, // 01
telsoa01ce3e84a2018-08-31 09:31:35 +0100827 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
828 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000829 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100830 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f, // 02
831 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000832 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100833 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
834 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f, // 03
835 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000836 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
837 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100838 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f, // 04
Matteo Martincighc7434122018-11-14 12:27:04 +0000839 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
840 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100841 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
842 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f, // 05
Matteo Martincighc7434122018-11-14 12:27:04 +0000843 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100844 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000845 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100846 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f, // 06
Matteo Martincighc7434122018-11-14 12:27:04 +0000847 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100848 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000849 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
850 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f, // 07
851 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100852 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
853 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
854 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +0000855 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
856 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
857 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
858 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f, // 09
telsoa01ce3e84a2018-08-31 09:31:35 +0100859 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000860 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
861 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100862 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f, // 10
863 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
864 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000865 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
866 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f, // 11
867 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
868 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100869 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
870 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f, // 12
Matteo Martincighc7434122018-11-14 12:27:04 +0000871 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100872 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
873 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
874 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f, // 13
875 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000876 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100877 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
878 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +0000879 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100880 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000881 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
882 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f, // 15
883 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
884 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
885 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
886 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f, // 16
telsoa01ce3e84a2018-08-31 09:31:35 +0100887 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000888 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100889 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
890 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +0000891 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100892 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
893 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000894 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f, // 18
895 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
896 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
897 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100898 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, // 19
899 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000900 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
901 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
902 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100903 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
904 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000905 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
906 std::vector<float> recurrentToCellWeightsValue
907 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100908 -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000909 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
910 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100911 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000912 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
913 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100914 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
915 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
916 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
917 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000918 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100919 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
920 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000921 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
922 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
923 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100924 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000925 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
926 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100927 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000928 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
929 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
930 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100931 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000932 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100933 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000934 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100935 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000936 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100937 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000938 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
939 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100940 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000941 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100942 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000943 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100944 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
945 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
946 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
947 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000948 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
949 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100950 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000951 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
952 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
953 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100954 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000955 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
956 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
957 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
958 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100959 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000960 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100961 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000962 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100963 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
964 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
965 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
966 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000967 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
968 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100969 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
970 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
971 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
972 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
973 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000974 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
975 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100976 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000977 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
978 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100979 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
980 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000981 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100982 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
983 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000984 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100985 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
986 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000987 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
988 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100989 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
990 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000991 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
992 std::vector<float> recurrentToOutputWeightsValue
993 {
994 0.025825322f, -0.05813119f, 0.09495884f, -0.045984812f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100995 -0.01255415f, -0.0026479573f, -0.08196161f, -0.054914974f,
996 -0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f,
997 -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f,
998 -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f,
999 -0.023314456f, -0.007401714f, -0.09255757f, 0.029460307f,
1000 -0.08829125f, -0.005139627f, -0.08989442f, -0.0555066f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001001 0.13596267f, -0.025062224f, -0.048351806f, -0.03850004f,
1002 0.07266485f, -0.022414139f, 0.05940088f, 0.075114764f,
1003 0.09597592f, -0.010211725f, -0.0049794707f, -0.011523867f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001004 -0.025980417f, 0.072999895f, 0.11091378f, -0.081685916f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001005 0.014416728f, 0.043229222f, 0.034178585f, -0.07530371f,
1006 0.035837382f, -0.085607f, -0.007721233f, -0.03287832f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001007 -0.043848954f, -0.06404588f, -0.06632928f, -0.073643476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001008 0.008214239f, -0.045984086f, 0.039764922f, 0.03474462f,
1009 0.060612556f, -0.080590084f, 0.049127717f, 0.04151091f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001010 -0.030063879f, 0.008801774f, -0.023021035f, -0.019558564f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001011 0.05158114f, -0.010947698f, -0.011825728f, 0.0075720972f,
1012 0.0699727f, -0.0039981045f, 0.069350146f, 0.08799282f,
1013 0.016156472f, 0.035502106f, 0.11695009f, 0.006217345f,
1014 0.13392477f, -0.037875112f, 0.025745004f, 0.08940699f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001015 -0.00924166f, 0.0046702605f, -0.036598757f, -0.08811812f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001016 0.10522024f, -0.032441203f, 0.008176899f, -0.04454919f,
1017 0.07058152f, 0.0067963637f, 0.039206743f, 0.03259838f,
1018 0.03725492f, -0.09515802f, 0.013326398f, -0.052055415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001019 -0.025676316f, 0.03198509f, -0.015951829f, -0.058556724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001020 0.036879618f, 0.043357447f, 0.028362012f, -0.05908629f,
1021 0.0059240665f, -0.04995891f, -0.019187413f, 0.0276265f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001022 -0.01628143f, 0.0025863599f, 0.08800015f, 0.035250366f,
1023 -0.022165963f, -0.07328642f, -0.009415526f, -0.07455109f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001024 0.11690406f, 0.0363299f, 0.07411125f, 0.042103454f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001025 -0.009660886f, 0.019076364f, 0.018299393f, -0.046004917f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001026 0.08891175f, 0.0431396f, -0.026327137f, -0.051502608f,
1027 0.08979574f, -0.051670972f, 0.04940282f, -0.07491107f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001028 -0.021240504f, 0.022596184f, -0.034280192f, 0.060163025f,
1029 -0.058211457f, -0.051837247f, -0.01349775f, -0.04639988f,
1030 -0.035936575f, -0.011681591f, 0.064818054f, 0.0073146066f,
1031 -0.021745546f, -0.043124277f, -0.06471268f, -0.07053354f,
1032 -0.029321948f, -0.05330136f, 0.016933719f, -0.053782392f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001033 0.13747959f, -0.1361751f, -0.11569455f, 0.0033329215f,
1034 0.05693899f, -0.053219706f, 0.063698f, 0.07977434f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001035 -0.07924483f, 0.06936997f, 0.0034815092f, -0.007305279f,
1036 -0.037325785f, -0.07251102f, -0.033633437f, -0.08677009f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001037 0.091591336f, -0.14165086f, 0.021752775f, 0.019683983f,
1038 0.0011612234f, -0.058154266f, 0.049996935f, 0.0288841f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001039 -0.0024567875f, -0.14345716f, 0.010955264f, -0.10234828f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001040 0.1183656f, -0.0010731248f, -0.023590032f, -0.072285876f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001041 -0.0724771f, -0.026382286f, -0.0014920527f, 0.042667855f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001042 0.0018776858f, 0.02986552f, 0.009814309f, 0.0733756f,
1043 0.12289186f, 0.018043943f, -0.0458958f, 0.049412545f,
1044 0.033632483f, 0.05495232f, 0.036686596f, -0.013781798f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001045 -0.010036754f, 0.02576849f, -0.08307328f, 0.010112348f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001046 0.042521734f, -0.05869831f, -0.071689695f, 0.03876447f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001047 -0.13275425f, -0.0352966f, -0.023077697f, 0.10285965f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001048 0.084736146f, 0.15568255f, -0.00040734606f, 0.027835453f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001049 -0.10292561f, -0.032401145f, 0.10053256f, -0.026142767f,
1050 -0.08271222f, -0.0030240538f, -0.016368777f, 0.1070414f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001051 0.042672627f, 0.013456989f, -0.0437609f, -0.022309763f,
1052 0.11576483f, 0.04108048f, 0.061026827f, -0.0190714f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001053 -0.0869359f, 0.037901703f, 0.0610107f, 0.07202949f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001054 0.01675338f, 0.086139716f, -0.08795751f, -0.014898893f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001055 -0.023771819f, -0.01965048f, 0.007955471f, -0.043740474f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001056 0.03346837f, -0.10549954f, 0.090567775f, 0.042013682f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001057 -0.03176985f, 0.12569028f, -0.02421228f, -0.029526481f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001058 0.023851605f, 0.031539805f, 0.05292009f, -0.02344001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001059 -0.07811758f, -0.08834428f, 0.10094801f, 0.16594367f,
1060 -0.06861939f, -0.021256343f, -0.041093912f, -0.06669611f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001061 0.035498552f, 0.021757556f, -0.09302526f, -0.015403468f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001062 -0.06614931f, -0.051798206f, -0.013874718f, 0.03630673f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001063 0.010412845f, -0.08077351f, 0.046185967f, 0.0035662893f,
1064 0.03541868f, -0.094149634f, -0.034814864f, 0.003128424f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001065 -0.020674974f, -0.03944324f, -0.008110165f, -0.11113267f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001066 0.08484226f, 0.043586485f, 0.040582247f, 0.0968012f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001067 -0.065249965f, -0.028036479f, 0.0050708856f, 0.0017462453f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001068 0.0326779f, 0.041296225f, 0.09164146f, -0.047743853f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001069 -0.015952192f, -0.034451712f, 0.084197424f, -0.05347844f,
1070 -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001071 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001072 -0.057309967f, -0.012775832f, -0.0032452994f, 0.01977615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001073 -0.041040014f, -0.024264973f, 0.063464895f, 0.05431621f
1074 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001075 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001076 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1077 std::vector<float> cellToInputWeightsValue
1078 {
1079 0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
1080 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
1081 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f, -0.052169047f,
1082 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
1083 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001084 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001085 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1086 std::vector<float> cellToForgetWeightsValue
1087 {
1088 -0.01998659f, -0.15568835f, -0.24248174f, -0.012770197f, 0.041331276f,
1089 -0.072311886f, -0.052123554f, -0.0066330447f, -0.043891653f, 0.036225766f,
1090 -0.047248036f, 0.021479502f, 0.033189066f, 0.11952997f, -0.020432774f,
1091 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
1092 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001093 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001094 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1095 std::vector<float> cellToOutputWeightsValue
1096 {
1097 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
1098 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
1099 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
1100 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
1101 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001102 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001103 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1104 std::vector<float> inputGateBiasValue
1105 {
1106 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
1107 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
1108 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
1109 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
1110 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001111 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001112 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1113 std::vector<float> forgetGateBiasValue
1114 {
1115 0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, 0.11098921f,
1116 0.15378423f, 0.09263801f, 0.09790885f, 0.09508917f, 0.061199076f,
1117 0.07665568f, -0.015443159f, -0.03499149f, 0.046190713f, 0.08895977f,
1118 0.10899629f, 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
1119 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001120 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001121 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1122 std::vector<float> cellBiasValue
1123 {
1124 -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, -0.1483596f,
1125 -0.10639995f, -0.091433935f, 0.058573797f, -0.06809782f, -0.07889636f,
1126 -0.043246906f, -0.09829136f, -0.4279842f, 0.034901652f, 0.18797937f,
1127 0.0075234566f, 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
1128 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001129 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001130 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1131 std::vector<float> outputGateBiasValue
1132 {
1133 0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
1134 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
1135 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
1136 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
1137 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001138 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1139 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001140 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
1141 std::vector<float> projectionWeightsValue
1142 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001143 -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001144 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001145 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
1146 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001147 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
1148 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
1149 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
1150 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001151 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
1152 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
1153 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001154 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
1155 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
1156 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
1157 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
1158 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001159 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001160 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001161 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001162 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001163 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
1164 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001165 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001166 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001167 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001168 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
1169 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001170 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001171 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
1172 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
1173 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001174 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
1175 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001176 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001177 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
1178 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
1179 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
1180 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
1181 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001182 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
1183 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001184 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001185 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
1186 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001187 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
1188 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
1189 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001190 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
1191 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
1192 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001193 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001194 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001195 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
1196 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001197 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
1198 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
1199 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001200 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001201 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
1202 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
1203 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001204 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
1205 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
1206 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
1207 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001208 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001209 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1210 std::vector<float> projectionBiasValue(outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001211
1212 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001213 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1214 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001215 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001216 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1217 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001218
Matteo Martincighc7434122018-11-14 12:27:04 +00001219 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001220 // 20: The activation function: A value indicating the activation function:
1221 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001222 hidl_vec<uint32_t> activationFunctionDimensions{};
1223 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001224 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1225 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001226 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1227 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001228 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1229 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001230 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1231 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001232
1233 // Outputs:
1234 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1235 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001236 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1237 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1238 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1239 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1240 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
1241 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001242 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001243 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1244 std::vector<float> outputStateOutValue
1245 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001246 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835577f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001247 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415119f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001248 -0.013869f, 0.0287268f, -0.00334694f, 0.00733397f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001249 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.0216801f
1250 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001251 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001252 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1253 std::vector<float> cellStateOutValue
1254 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001255 -0.0531632f, -0.0118138f, 0.0870833f, 0.0347929f, -0.076144f,
1256 -0.0659219f, -0.0463811f, 0.0141307f, -0.0127706f, -0.03782f,
1257 -0.00402401f, -0.00571876f, -0.187957f, -0.0247127f, 0.0711425f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001258 0.008244f, 0.0492649f, 0.126972f, 0.0933097f, 0.29848f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001259 -0.0966178f, -0.114417f, 0.0387229f, 0.0453255f, -0.181286f,
1260 -0.0651251f, -0.0996879f, -0.00276995f, 0.0617558f, -0.0100728f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001261 0.056304f, -0.077416f, -0.162858f, -0.0541251f, 0.0571202f,
1262 -0.0525331f, 0.0724297f, 0.171029f, 0.141738f, 0.295483f
1263 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001264 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1265 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001266 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1267 std::vector<float> outputValue
1268 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001269 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001270 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001271 -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001272 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f
1273 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001274
1275 LstmTestImpl(inputDimensions, inputValue,
1276 inputToInputWeightsDimensions, inputToInputWeightsValue,
1277 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1278 inputToCellWeightsDimensions, inputToCellWeightsValue,
1279 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1280 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1281 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1282 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1283 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1284 cellToInputWeightsDimensions, cellToInputWeightsValue,
1285 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1286 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1287 inputGateBiasDimensions, inputGateBiasValue,
1288 forgetGateBiasDimensions, forgetGateBiasValue,
1289 cellBiasDimensions, cellBiasValue,
1290 outputGateBiasDimensions, outputGateBiasValue,
1291 projectionWeightsDimensions, projectionWeightsValue,
1292 projectionBiasDimensions, projectionBiasValue,
1293 outputStateInDimensions, outputStateInValue,
1294 cellStateInDimensions, cellStateInValue,
1295 activationFunctionDimensions, activationFunctionValue,
1296 cellClippingThresholdDimensions, cellClippingThresholdValue,
1297 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1298 scratchBufferDimensions, scratchBufferValue,
1299 outputStateOutDimensions, outputStateOutValue,
1300 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +00001301 outputDimensions, outputValue,
1302 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +01001303}
1304
Matteo Martincighc7434122018-11-14 12:27:04 +00001305void LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +01001306{
1307 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
1308 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
1309 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1310 // The batch size has been increased to 2 (it was 1 in the VTS test) with appropriate input and output values added.
1311
1312 uint32_t batchSize = 2;
1313 uint32_t inputSize = 2;
1314 uint32_t numUnits = 4;
1315 uint32_t outputSize = numUnits;
1316
1317 // Inputs:
1318 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1319 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +00001320 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1321 std::vector<float> inputValue{2.0f, 3.0f, 3.0f, 4.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001322
1323 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1324 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +00001325 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1326 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001327 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1328 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001329 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1330 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
1331 0.13056988f, -0.36333650f,
1332 -0.22755712f, 0.28253698f,
1333 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001334 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001335 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1336 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
1337 -0.09624726f, 0.05100781f,
1338 0.04717243f, 0.48944736f,
1339 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001340 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1341 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001342 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1343 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
1344 -0.55932593f, -0.09426838f,
1345 -0.44257352f, 0.54939759f,
1346 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001347 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1348 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1349 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +00001350 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
1351 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001352 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1353 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001354 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1355 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
1356 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
1357 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
1358 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001359 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1360 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001361 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1362 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
1363 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
1364 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
1365 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001366 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1367 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001368 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1369 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
1370 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
1371 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1372 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001373 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001374 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1375 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001376 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001377 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1378 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001379 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001380 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1381 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001382 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001383 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
1384 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001385 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001386 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1387 std::vector<float> forgetGateBiasValue{1.0f, 1.0f, 1.0f, 1.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001388 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001389 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1390 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001391 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001392 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1393 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001394 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1395 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001396 hidl_vec<uint32_t> projectionWeightsDimensions{0};
1397 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001398 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001399 hidl_vec<uint32_t> projectionBiasDimensions{0};
1400 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001401
1402 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001403 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1404 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001405 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001406 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1407 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001408
Matteo Martincighc7434122018-11-14 12:27:04 +00001409 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001410 // 20: The activation function: A value indicating the activation function:
1411 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001412 hidl_vec<uint32_t> activationFunctionDimensions{};
1413 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001414 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1415 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001416 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1417 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001418 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1419 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001420 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1421 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001422
1423 // Outputs:
1424 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1425 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001426 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1427 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1428 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1429 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1430 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1431 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001432 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001433 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1434 std::vector<float> outputStateOutValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1435 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001436 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001437 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1438 std::vector<float> cellStateOutValue{-0.76044439f, -0.01804161f, 0.18226376f, -0.06493707f,
1439 -0.90477051f, -0.04355603f, 0.18475688f, -0.04158677f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001440 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1441 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001442 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1443 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1444 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001445
1446 LstmTestImpl(inputDimensions, inputValue,
1447 inputToInputWeightsDimensions, inputToInputWeightsValue,
1448 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1449 inputToCellWeightsDimensions, inputToCellWeightsValue,
1450 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1451 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1452 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1453 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1454 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1455 cellToInputWeightsDimensions, cellToInputWeightsValue,
1456 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1457 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1458 inputGateBiasDimensions, inputGateBiasValue,
1459 forgetGateBiasDimensions, forgetGateBiasValue,
1460 cellBiasDimensions, cellBiasValue,
1461 outputGateBiasDimensions, outputGateBiasValue,
1462 projectionWeightsDimensions, projectionWeightsValue,
1463 projectionBiasDimensions, projectionBiasValue,
1464 outputStateInDimensions, outputStateInValue,
1465 cellStateInDimensions, cellStateInValue,
1466 activationFunctionDimensions, activationFunctionValue,
1467 cellClippingThresholdDimensions, cellClippingThresholdValue,
1468 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1469 scratchBufferDimensions, scratchBufferValue,
1470 outputStateOutDimensions, outputStateOutValue,
1471 cellStateOutDimensions, cellStateOutValue,
Matteo Martincighc7434122018-11-14 12:27:04 +00001472 outputDimensions, outputValue,
1473 compute);
1474}
1475
1476static const boost::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
1477
1478BOOST_DATA_TEST_CASE(LstmNoCifgNoPeepholeNoProjectionTest, COMPUTE_DEVICES)
1479{
1480 LstmNoCifgNoPeepholeNoProjection(sample);
1481}
1482
1483BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionTest, COMPUTE_DEVICES)
1484{
1485 LstmCifgPeepholeNoProjection(sample);
1486}
1487
1488BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionTest, COMPUTE_DEVICES)
1489{
1490 LstmNoCifgPeepholeProjection(sample);
1491}
1492
1493BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionBatch2Test, COMPUTE_DEVICES)
1494{
1495 LstmCifgPeepholeNoProjectionBatch2(sample);
telsoa01ce3e84a2018-08-31 09:31:35 +01001496}
1497
1498BOOST_AUTO_TEST_SUITE_END()