blob: 3d9bf77fb656f613019cf69ce8abf9bfc1a79325 [file] [log] [blame]
telsoa01ce3e84a2018-08-31 09:31:35 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa01ce3e84a2018-08-31 09:31:35 +01004//
Matteo Martincighc7434122018-11-14 12:27:04 +00005
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01006#pragma once
7
8#include "DriverTestHelpers.hpp"
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +01009
Matteo Martincighc7434122018-11-14 12:27:04 +000010#include <boost/array.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010011#include <boost/math/special_functions/relative_difference.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010012
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +010013using ArmnnDriver = armnn_driver::ArmnnDriver;
telsoa01ce3e84a2018-08-31 09:31:35 +010014using DriverOptions = armnn_driver::DriverOptions;
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +010015
telsoa01ce3e84a2018-08-31 09:31:35 +010016using namespace driverTestHelpers;
17using namespace android::hardware;
18
19namespace
20{
21
22template<typename T>
Matteo Martincighc7434122018-11-14 12:27:04 +000023RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
telsoa01ce3e84a2018-08-31 09:31:35 +010024{
25 DataLocation inputInloc = {};
26 inputInloc.poolIndex = poolIndex;
27 inputInloc.offset = 0;
28 inputInloc.length = value.size() * sizeof(T);
29 RequestArgument inputRequestArgument = {};
30 inputRequestArgument.location = inputInloc;
31 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
32 return inputRequestArgument;
33}
34
35// Returns true if the relative difference between two float values is less than the tolerance value given.
36// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
37bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f)
38{
39 float rd;
40 if (a == 0.0f)
41 {
42 rd = fabs(b);
43 }
44 else if (b == 0.0f)
45 {
46 rd = fabs(a);
47 }
48 else
49 {
50 rd = boost::math::relative_difference(a, b);
51 }
52 return rd < tolerance;
53}
54
Kevin Mayf29a2c52019-03-14 11:56:32 +000055// Helper function to create an OperandLifeTime::NO_VALUE for testing.
56// To be used on optional input operands that have no values - these are valid and should be tested.
57OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
58{
59 // Only create a NO_VALUE for optional operands that have no elements
60 if (dimensions.size() == 0 || dimensions[0] == 0)
61 {
62 return OperandLifeTime::NO_VALUE;
63 }
64 return OperandLifeTime::CONSTANT_COPY;
65}
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010066
67template<typename HalModel>
68void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const Request& request)
69{
70 android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, driver);
71 if (preparedModel.get() != nullptr)
72 {
73 Execute(preparedModel, request);
74 }
75}
76
77#ifdef ARMNN_ANDROID_NN_V1_2
78
79template<>
80void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::hal_1_2::HalPolicy::Model& model,
81 armnn_driver::ArmnnDriver& driver,
82 const Request& request)
83{
84 android::sp<V1_2::IPreparedModel> preparedModel = PrepareModel_1_2(model, driver);
85 if (preparedModel.get() != nullptr)
86 {
87 Execute(preparedModel, request);
88 }
89}
90
91#endif
92
Matteo Martincighc7434122018-11-14 12:27:04 +000093} // anonymous namespace
telsoa01ce3e84a2018-08-31 09:31:35 +010094
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010095#ifndef ARMCOMPUTECL_ENABLED
96static const boost::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
97#else
98static const boost::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
99#endif
telsoa01ce3e84a2018-08-31 09:31:35 +0100100
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100101// Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
102template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000103void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
104 const std::vector<float>& inputValue,
105 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
106 const std::vector<float>& inputToInputWeightsValue,
107 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
108 const std::vector<float>& inputToForgetWeightsValue,
109 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
110 const std::vector<float>& inputToCellWeightsValue,
111 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
112 const std::vector<float>& inputToOutputWeightsValue,
113 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
114 const std::vector<float>& recurrentToInputWeightsValue,
115 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
116 const std::vector<float>& recurrentToForgetWeightsValue,
117 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
118 const std::vector<float>& recurrentToCellWeightsValue,
119 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
120 const std::vector<float>& recurrentToOutputWeightsValue,
121 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
122 const std::vector<float>& cellToInputWeightsValue,
123 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
124 const std::vector<float>& cellToForgetWeightsValue,
125 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
126 const std::vector<float>& cellToOutputWeightsValue,
127 const hidl_vec<uint32_t>& inputGateBiasDimensions,
128 const std::vector<float>& inputGateBiasValue,
129 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
130 const std::vector<float>& forgetGateBiasValue,
131 const hidl_vec<uint32_t>& cellBiasDimensions,
132 const std::vector<float>& cellBiasValue,
133 const hidl_vec<uint32_t>& outputGateBiasDimensions,
134 const std::vector<float>& outputGateBiasValue,
135 const hidl_vec<uint32_t>& projectionWeightsDimensions,
136 const std::vector<float>& projectionWeightsValue,
137 const hidl_vec<uint32_t>& projectionBiasDimensions,
138 const std::vector<float>& projectionBiasValue,
139 const hidl_vec<uint32_t>& outputStateInDimensions,
140 const std::vector<float>& outputStateInValue,
141 const hidl_vec<uint32_t>& cellStateInDimensions,
142 const std::vector<float>& cellStateInValue,
143 const hidl_vec<uint32_t>& activationFunctionDimensions,
144 const std::vector<int32_t>& activationFunctionValue,
145 const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
146 const std::vector<float>& cellClippingThresholdValue,
147 const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
148 const std::vector<float>& projectionClippingThresholdValue,
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100149 const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
150 const std::vector<float>& inputLayerNormWeightsValue,
151 const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
152 const std::vector<float>& forgetLayerNormWeightsValue,
153 const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
154 const std::vector<float>& cellLayerNormWeightsValue,
155 const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
156 const std::vector<float>& outputLayerNormWeightsValue,
Matteo Martincighc7434122018-11-14 12:27:04 +0000157 const hidl_vec<uint32_t>& scratchBufferDimensions,
158 const std::vector<float>& scratchBufferValue,
159 const hidl_vec<uint32_t>& outputStateOutDimensions,
160 const std::vector<float>& outputStateOutValue,
161 const hidl_vec<uint32_t>& cellStateOutDimensions,
162 const std::vector<float>& cellStateOutValue,
163 const hidl_vec<uint32_t>& outputDimensions,
164 const std::vector<float>& outputValue,
165 armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100166{
Matteo Martincighc7434122018-11-14 12:27:04 +0000167 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100168 using Model = typename HalPolicy::Model;
169 Model model = {};
telsoa01ce3e84a2018-08-31 09:31:35 +0100170
171 // Inputs:
172 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
173 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100174 AddInputOperand<HalPolicy>(model, inputDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100175
176 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
177 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100178 AddTensorOperand<HalPolicy>(model,
179 inputToInputWeightsDimensions,
180 inputToInputWeightsValue,
181 HalPolicy::OperandType::TENSOR_FLOAT32,
182 CreateNoValueLifeTime(inputToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100183 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
184 // [num_units, input_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100185 AddTensorOperand<HalPolicy>(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
186 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
187 // [num_units, input_size].
188 AddTensorOperand<HalPolicy>(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100189 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
190 // [num_units, input_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100191 AddTensorOperand<HalPolicy>(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100192 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
193 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
194 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100195 AddTensorOperand<HalPolicy>(model,
196 recurrentToInputWeightsDimensions,
197 recurrentToInputWeightsValue,
198 HalPolicy::OperandType::TENSOR_FLOAT32,
199 CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100200 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
201 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100202 AddTensorOperand<HalPolicy>(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100203 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
204 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100205 AddTensorOperand<HalPolicy>(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100206 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
207 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100208 AddTensorOperand<HalPolicy>(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100209 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100210 AddTensorOperand<HalPolicy>(model,
211 cellToInputWeightsDimensions,
212 cellToInputWeightsValue,
213 HalPolicy::OperandType::TENSOR_FLOAT32,
214 CreateNoValueLifeTime(cellToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100215 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100216 AddTensorOperand<HalPolicy>(model,
217 cellToForgetWeightsDimensions,
218 cellToForgetWeightsValue,
219 HalPolicy::OperandType::TENSOR_FLOAT32,
220 CreateNoValueLifeTime(cellToForgetWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100221 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100222 AddTensorOperand<HalPolicy>(model,
223 cellToOutputWeightsDimensions,
224 cellToOutputWeightsValue,
225 HalPolicy::OperandType::TENSOR_FLOAT32,
226 CreateNoValueLifeTime(cellToOutputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100227 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100228 AddTensorOperand<HalPolicy>(model,
229 inputGateBiasDimensions,
230 inputGateBiasValue,
231 HalPolicy::OperandType::TENSOR_FLOAT32,
232 CreateNoValueLifeTime(inputGateBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100233 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100234 AddTensorOperand<HalPolicy>(model, forgetGateBiasDimensions, forgetGateBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100235 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100236 AddTensorOperand<HalPolicy>(model, cellBiasDimensions, cellBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100237 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100238 AddTensorOperand<HalPolicy>(model, outputGateBiasDimensions, outputGateBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100239 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
240 // [output_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100241 AddTensorOperand<HalPolicy>(model,
242 projectionWeightsDimensions,
243 projectionWeightsValue,
244 HalPolicy::OperandType::TENSOR_FLOAT32,
245 CreateNoValueLifeTime(projectionWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100246 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100247 AddTensorOperand<HalPolicy>(model,
248 projectionBiasDimensions,
249 projectionBiasValue,
250 HalPolicy::OperandType::TENSOR_FLOAT32,
251 CreateNoValueLifeTime(projectionBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100252
253 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100254 AddInputOperand<HalPolicy>(model, outputStateInDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100255 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100256 AddInputOperand<HalPolicy>(model, cellStateInDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100257
Matteo Martincighc7434122018-11-14 12:27:04 +0000258 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100259 // 20: The activation function: A value indicating the activation function:
260 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100261 AddTensorOperand<HalPolicy>(model,
262 activationFunctionDimensions,
263 activationFunctionValue,
264 HalPolicy::OperandType::INT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100265 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
266 // If set to 0.0 then clipping is disabled.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100267 AddTensorOperand<HalPolicy>(model,
268 cellClippingThresholdDimensions,
269 cellClippingThresholdValue,
270 HalPolicy::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100271 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
272 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100273 AddTensorOperand<HalPolicy>(model,
274 projectionClippingThresholdDimensions,
275 projectionClippingThresholdValue,
276 HalPolicy::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100277
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100278 bool normalizationEnabled = false;
279
280 // If any of the tensors have a value all normalization tensors are set
281 if (!inputLayerNormWeightsValue.empty() ||
282 !forgetLayerNormWeightsValue.empty() ||
283 !cellLayerNormWeightsValue.empty() ||
284 !outputLayerNormWeightsValue.empty())
285 {
286 // Normalization:
287 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
288 // Used to rescale normalized inputs to activation at input gate.
289 AddTensorOperand<HalPolicy>(model,
290 inputLayerNormWeightsDimensions,
291 inputLayerNormWeightsValue,
292 HalPolicy::OperandType::TENSOR_FLOAT32,
293 CreateNoValueLifeTime(inputLayerNormWeightsDimensions));
294 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
295 // Used to rescale normalized inputs to activation at forget gate.
296 AddTensorOperand<HalPolicy>(model,
297 forgetLayerNormWeightsDimensions,
298 forgetLayerNormWeightsValue,
299 HalPolicy::OperandType::TENSOR_FLOAT32,
300 CreateNoValueLifeTime(forgetLayerNormWeightsDimensions));
301 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
302 // Used to rescale normalized inputs to activation at cell gate.
303 AddTensorOperand<HalPolicy>(model,
304 cellLayerNormWeightsDimensions,
305 cellLayerNormWeightsValue,
306 HalPolicy::OperandType::TENSOR_FLOAT32,
307 CreateNoValueLifeTime(cellLayerNormWeightsDimensions));
308 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
309 // Used to rescale normalized inputs to activation at output gate.
310 AddTensorOperand<HalPolicy>(model,
311 outputLayerNormWeightsDimensions,
312 outputLayerNormWeightsValue,
313 HalPolicy::OperandType::TENSOR_FLOAT32,
314 CreateNoValueLifeTime(outputLayerNormWeightsDimensions));
315
316 normalizationEnabled = true;
317 }
318
telsoa01ce3e84a2018-08-31 09:31:35 +0100319 // Outputs:
320 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
321 // CIFG, or [batch_size, num_units * 3] without CIFG.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100322 AddOutputOperand<HalPolicy>(model, scratchBufferDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100323 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100324 AddOutputOperand<HalPolicy>(model, outputStateOutDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100325 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100326 AddOutputOperand<HalPolicy>(model, cellStateOutDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100327 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
328 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100329 AddOutputOperand<HalPolicy>(model, outputDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100330
331 // make the lstm operation
332 model.operations.resize(1);
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100333 model.operations[0].type = HalPolicy::OperationType::LSTM;
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100334
335 if (normalizationEnabled)
336 {
337 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
338 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26};
339 model.operations[0].outputs = hidl_vec<uint32_t> {27, 28, 29, 30};
340 }
341 else
342 {
343 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
344 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
345 model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};
346 }
telsoa01ce3e84a2018-08-31 09:31:35 +0100347
348 // define the input values
349 hidl_vec<RequestArgument> inputArguments;
350 inputArguments.resize(3);
351
352 inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
353 inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
354 inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
355
356 // define the expected output values
357 hidl_vec<RequestArgument> outputArguments;
358 outputArguments.resize(4);
359
360 outputArguments[0] = CreateRequestArgument<float>(scratchBufferValue, 3);
361 outputArguments[1] = CreateRequestArgument<float>(outputStateOutValue, 4);
362 outputArguments[2] = CreateRequestArgument<float>(cellStateOutValue, 5);
363 outputArguments[3] = CreateRequestArgument<float>(outputValue, 6);
364
365 Request request = {};
366 request.inputs = inputArguments;
367 request.outputs = outputArguments;
368
369 // set the input data
370 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
371 AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
372 AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
373
374 // add memory for the outputs
375 AddPoolAndGetData(scratchBufferValue.size(), request);
376 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData(outputStateOutValue.size(), request);
377 float* outputStateOutData = static_cast<float*>(static_cast<void*>(outputStateOutMemory->getPointer()));
378 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData(cellStateOutValue.size(), request);
379 float* cellStateOutData = static_cast<float*>(static_cast<void*>(cellStateOutMemory->getPointer()));
380 android::sp<IMemory> outputMemory = AddPoolAndGetData(outputValue.size(), request);
381 float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
382
383 // make the prepared model and run the execution
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100384 ExecuteModel(model, *driver, request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100385
386 // check the results
387 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
388 {
389 BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
390 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
391 }
392 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
393 {
394 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
395 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
396 }
397 for (size_t i = 0; i < outputValue.size(); ++i)
398 {
399 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
400 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
401 }
402}
403
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100404template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000405void LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100406{
407 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm.model.cpp
408 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm.example.cpp
409 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
410
Matteo Martincighc7434122018-11-14 12:27:04 +0000411 uint32_t batchSize = 1;
412 uint32_t inputSize = 2;
413 uint32_t numUnits = 4;
414 uint32_t outputSize = numUnits;
415
telsoa01ce3e84a2018-08-31 09:31:35 +0100416 // Inputs:
417 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
418 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000419 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
420 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100421
422 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
423 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000424 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
425 std::vector<float> inputToInputWeightsValue{-0.45018822f, -0.02338299f,
426 -0.08705890f, -0.34550029f,
427 0.04266912f, -0.15680569f,
428 -0.34856534f, 0.43890524f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100429 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
430 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000431 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
432 std::vector<float> inputToForgetWeightsValue{ 0.09701663f, 0.20334584f,
433 -0.50592935f, -0.31343272f,
434 -0.40032279f, 0.44781327f,
435 0.01387155f, -0.35593212f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100436 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000437 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
438 std::vector<float> inputToCellWeightsValue{-0.50013041f, 0.13702840f,
439 0.11810488f, 0.20131630f,
440 -0.20583314f, 0.44344562f,
441 0.22077113f, -0.29909778f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100442 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
443 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000444 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
445 std::vector<float> inputToOutputWeightsValue{-0.25065863f, -0.28290087f,
446 0.04613829f, 0.40525138f,
447 0.44272184f, 0.03897077f,
448 -0.15568960f, 0.19487578f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100449 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
450 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
451 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000452 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
453 std::vector<float> recurrentToInputWeightsValue{-0.00635350f, -0.20423880f, 0.31454784f, -0.35746509f,
454 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f,
455 -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f,
456 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100457 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
458 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000459 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
460 std::vector<float> recurrentToForgetWeightsValue{-0.48684245f, -0.06655136f, 0.42224967f, 0.21126390f,
461 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f,
462 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f,
463 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100464 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
465 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000466 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
467 std::vector<float> recurrentToCellWeightsValue{-0.34074140f, 0.24443203f, -0.20785320f, 0.26320225f,
468 0.05695659f, -0.00123841f, -0.47447860f, -0.35869038f,
469 -0.06418842f, -0.13502428f, -0.50176400f, 0.22830659f,
470 -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100471 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
472 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000473 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
474 std::vector<float> recurrentToOutputWeightsValue{ 0.43385774f, -0.17194885f, 0.27182370f, 0.09215671f,
475 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f,
476 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f,
477 -0.51818722f, -0.15390486f, 0.04681480f, 0.39922136f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100478 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000479 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
480 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100481 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000482 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
483 std::vector<float> cellToForgetWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100484 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000485 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
486 std::vector<float> cellToOutputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100487 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000488 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
489 std::vector<float> inputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100490 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000491 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
492 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100493 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000494 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
495 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100496 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000497 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
498 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100499 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
500 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000501 hidl_vec<uint32_t> projectionWeightsDimensions{0};
502 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100503 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000504 hidl_vec<uint32_t> projectionBiasDimensions{0};
505 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100506
507 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000508 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
509 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100510 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000511 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
512 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100513
Matteo Martincighc7434122018-11-14 12:27:04 +0000514 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100515 // 20: The activation function: A value indicating the activation function:
516 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000517 hidl_vec<uint32_t> activationFunctionDimensions{};
518 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100519 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
520 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000521 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
522 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100523 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
524 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000525 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
526 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100527
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100528 // Normalization:
529 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
530 // Used to rescale normalized inputs to activation at input gate.
531 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
532 std::vector<float> inputLayerNormWeightsValue;
533 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
534 // Used to rescale normalized inputs to activation at forget gate.
535 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
536 std::vector<float> forgetLayerNormWeightsValue;
537 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
538 // Used to rescale normalized inputs to activation at cell gate.
539 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
540 std::vector<float> cellLayerNormWeightsValue;
541 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
542 // Used to rescale normalized inputs to activation at output gate.
543 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
544 std::vector<float> outputLayerNormWeightsValue;
545
telsoa01ce3e84a2018-08-31 09:31:35 +0100546 // Outputs:
547 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
548 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000549 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
550 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
551 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
552 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
553 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
554 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100555 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000556 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100557 std::vector<float> outputStateOutValue {-0.0297319f, 0.122947f, 0.208851f, -0.153588f};
558 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000559 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
telsoa01ce3e84a2018-08-31 09:31:35 +0100560 std::vector<float> cellStateOutValue {-0.145439f, 0.157475f, 0.293663f, -0.277353f};
561 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
562 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000563 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100564 std::vector<float> outputValue {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f};
565
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100566 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
567 inputToInputWeightsDimensions, inputToInputWeightsValue,
568 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
569 inputToCellWeightsDimensions, inputToCellWeightsValue,
570 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
571 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
572 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
573 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
574 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
575 cellToInputWeightsDimensions, cellToInputWeightsValue,
576 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
577 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
578 inputGateBiasDimensions, inputGateBiasValue,
579 forgetGateBiasDimensions, forgetGateBiasValue,
580 cellBiasDimensions, cellBiasValue,
581 outputGateBiasDimensions, outputGateBiasValue,
582 projectionWeightsDimensions, projectionWeightsValue,
583 projectionBiasDimensions, projectionBiasValue,
584 outputStateInDimensions, outputStateInValue,
585 cellStateInDimensions, cellStateInValue,
586 activationFunctionDimensions, activationFunctionValue,
587 cellClippingThresholdDimensions, cellClippingThresholdValue,
588 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
589 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
590 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
591 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
592 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
593 scratchBufferDimensions, scratchBufferValue,
594 outputStateOutDimensions, outputStateOutValue,
595 cellStateOutDimensions, cellStateOutValue,
596 outputDimensions, outputValue,
597 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100598}
599
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100600template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000601void LstmCifgPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100602{
603 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
604 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
605 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
606
Matteo Martincighc7434122018-11-14 12:27:04 +0000607 uint32_t batchSize = 1;
608 uint32_t inputSize = 2;
609 uint32_t numUnits = 4;
610 uint32_t outputSize = numUnits;
611
telsoa01ce3e84a2018-08-31 09:31:35 +0100612 // Inputs:
613 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
614 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000615 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
616 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100617
618 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
619 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000620 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
621 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100622 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
623 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000624 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
625 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
626 0.13056988f, -0.36333650f,
627 -0.22755712f, 0.28253698f,
628 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100629 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000630 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
631 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
632 -0.09624726f, 0.05100781f,
633 0.04717243f, 0.48944736f,
634 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100635 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
636 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000637 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
638 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
639 -0.55932593f, -0.09426838f,
640 -0.44257352f, 0.54939759f,
641 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100642 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
643 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
644 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000645 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
646 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100647 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
648 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000649 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
650 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
651 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
652 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
653 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100654 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
655 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000656 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
657 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
658 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
659 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
660 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100661 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
662 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000663 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
664 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
665 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
666 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
667 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100668 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000669 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
670 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100671 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000672 hidl_vec<uint32_t> cellToForgetWeightsDimensions{4};
673 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100674 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000675 hidl_vec<uint32_t> cellToOutputWeightsDimensions{4};
676 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100677 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000678 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
679 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100680 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000681 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
682 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100683 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000684 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
685 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100686 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000687 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
688 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100689 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
690 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000691 hidl_vec<uint32_t> projectionWeightsDimensions{0};
692 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100693 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000694 hidl_vec<uint32_t> projectionBiasDimensions{0};
695 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100696
697 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000698 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
699 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100700 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000701 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
702 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100703
Matteo Martincighc7434122018-11-14 12:27:04 +0000704 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100705 // 20: The activation function: A value indicating the activation function:
706 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000707 hidl_vec<uint32_t> activationFunctionDimensions{};
708 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100709 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
710 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000711 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
712 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100713 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
714 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000715 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
716 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100717
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100718 // Normalization:
719 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
720 // Used to rescale normalized inputs to activation at input gate.
721 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
722 std::vector<float> inputLayerNormWeightsValue;
723 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
724 // Used to rescale normalized inputs to activation at forget gate.
725 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
726 std::vector<float> forgetLayerNormWeightsValue;
727 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
728 // Used to rescale normalized inputs to activation at cell gate.
729 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
730 std::vector<float> cellLayerNormWeightsValue;
731 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
732 // Used to rescale normalized inputs to activation at output gate.
733 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
734 std::vector<float> outputLayerNormWeightsValue;
735
telsoa01ce3e84a2018-08-31 09:31:35 +0100736 // Outputs:
737 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
738 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000739 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
740 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
741 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
742 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
743 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
744 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100745 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000746 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
747 std::vector<float> outputStateOutValue{-0.364445f, -0.00352185f, 0.128866f, -0.0516365f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100748 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000749 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
750 std::vector<float> cellStateOutValue{-0.760444f, -0.0180416f, 0.182264f, -0.0649371f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100751 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
752 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000753 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
754 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100755
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100756 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
757 inputToInputWeightsDimensions, inputToInputWeightsValue,
758 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
759 inputToCellWeightsDimensions, inputToCellWeightsValue,
760 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
761 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
762 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
763 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
764 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
765 cellToInputWeightsDimensions, cellToInputWeightsValue,
766 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
767 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
768 inputGateBiasDimensions, inputGateBiasValue,
769 forgetGateBiasDimensions, forgetGateBiasValue,
770 cellBiasDimensions, cellBiasValue,
771 outputGateBiasDimensions, outputGateBiasValue,
772 projectionWeightsDimensions, projectionWeightsValue,
773 projectionBiasDimensions, projectionBiasValue,
774 outputStateInDimensions, outputStateInValue,
775 cellStateInDimensions, cellStateInValue,
776 activationFunctionDimensions, activationFunctionValue,
777 cellClippingThresholdDimensions, cellClippingThresholdValue,
778 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
779 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
780 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
781 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
782 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
783 scratchBufferDimensions, scratchBufferValue,
784 outputStateOutDimensions, outputStateOutValue,
785 cellStateOutDimensions, cellStateOutValue,
786 outputDimensions, outputValue,
787 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100788}
789
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100790template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000791void LstmNoCifgPeepholeProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100792{
793 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm3.model.cpp
794 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm3.example.cpp
795 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
796
Matteo Martincighc7434122018-11-14 12:27:04 +0000797 uint32_t batchSize = 2;
798 uint32_t inputSize = 5;
799 uint32_t numUnits = 20;
800 uint32_t outputSize = 16;
801
telsoa01ce3e84a2018-08-31 09:31:35 +0100802 // Inputs:
803 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
804 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000805 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
806 std::vector<float> inputValue{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
807 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100808
809 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
810 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000811 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
812 std::vector<float> inputToInputWeightsValue
813 {
814 0.0213936830f, 0.0612455100f, 0.0469051670f, -0.0146576770f, -0.0314946300f,
815 0.0917180300f, 0.1464780100f, 0.1079719300f, -0.0057968358f, 0.0019193048f,
816 -0.2726754000f, 0.1015402900f, -0.0185398850f, 0.0803498850f, -0.1026238500f,
817 -0.0225997870f, -0.0912115500f, -0.0086759670f, -0.0452061030f, -0.0821282000f,
818 -0.0080459520f, 0.0154780810f, 0.0552172470f, 0.0387195870f, 0.0441536270f,
819 -0.0645324300f, 0.0503182500f, -0.0469351080f, -0.0081644309f, 0.0145742260f,
820 -0.1671009000f, -0.1551955200f, -0.1681979700f, -0.1397126900f, -0.1195305900f,
821 0.2500548700f, -0.2279098300f, 0.0098550870f, -0.0281409580f, -0.1120069800f,
822 0.1129540800f, -0.0035217577f, 0.0544850750f, 0.0518469500f, 0.0647112060f,
823 0.1098919300f, 0.1167478600f, 0.0349060700f, 0.0772735700f, 0.1139058500f,
824 -0.1863375000f, -0.1034451000f, -0.1394518900f, -0.0494012270f, -0.1876706300f,
825 0.0424839030f, 0.1423355200f, 0.1383258100f, 0.1835016500f, 0.1454560300f,
826 -0.0285457040f, 0.0249395310f, 0.0509297180f, 0.0076203286f, -0.0029723682f,
827 -0.0424842240f, -0.1182759600f, -0.0917110400f, -0.1080862800f, -0.1632798800f,
828 -0.2273378000f, -0.0993647000f, -0.0171551070f, 0.0023917493f, 0.0492727640f,
829 0.0038534778f, 0.0547645050f, 0.0897537840f, 0.0694723400f, 0.0801447600f,
830 -0.0454423400f, -0.0497073000f, -0.0713563100f, -0.0489291060f, -0.0040420120f,
831 -0.0092840260f, 0.0180420540f, 0.0036860977f, -0.0742730200f, -0.1143460400f,
832 -0.0189954560f, 0.0314875430f, 0.0128349080f, 0.0199777540f, 0.0442566540f,
833 -0.3929261300f, -0.1851933400f, -0.1165128100f, -0.0680989200f, 0.0113736770f
834 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100835 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
836 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000837 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
838 std::vector<float> inputToForgetWeightsValue
839 {
840 -0.0018401089f, -0.0048522370f, 0.0369842400f, 0.0141817040f, 0.0282732360f,
841 -0.0167261940f, -0.0524975900f, -0.1020426100f, 0.0086106600f, -0.0409795050f,
842 -0.0098991870f, 0.0192389200f, -0.0281772690f, -0.0853510300f, -0.1458549500f,
843 0.1066256700f, -0.0190973100f, -0.0178835340f, -0.0047269356f, -0.0451033230f,
844 0.0030784295f, 0.0767847750f, 0.0746369600f, 0.0945313950f, 0.0814421000f,
845 -0.1225789900f, -0.0339457580f, -0.0313034650f, 0.0456306260f, 0.0684388700f,
846 -0.1349294500f, -0.0124800070f, -0.0811829000f, -0.0722449900f, -0.0962879100f,
847 0.0451009460f, 0.0012300825f, 0.0139646620f, 0.0993723940f, 0.0254305900f,
848 0.0695832400f, 0.0342572960f, 0.0482646000f, 0.0626799700f, 0.0526250680f,
849 0.1278466600f, 0.0707789700f, 0.0257259350f, 0.0416500900f, 0.0724190500f,
850 0.0186686440f, -0.0373772940f, -0.0627778300f, -0.0883363600f, -0.0401206050f,
851 -0.0114055860f, -0.0078083350f, -0.0103013860f, -0.0051021670f, 0.0277174640f,
852 0.0548342300f, 0.1144911100f, 0.1128965200f, 0.1093983900f, 0.1339650600f,
853 -0.0840216600f, -0.0190146200f, -0.0446783040f, -0.0772056500f, 0.0143500630f,
854 -0.1175795800f, -0.0652038000f, -0.0818573300f, -0.0767543240f, -0.0926143750f,
855 0.1040549100f, 0.0529603360f, 0.0357558950f, 0.0358393860f, -0.0125405530f,
856 0.0368812980f, 0.0291337600f, 0.0342015900f, 0.0544844700f, -0.0545233530f,
857 0.0258271500f, 0.0232735500f, -0.0118571790f, -0.0011980024f, -0.0346417170f,
858 -0.0261250940f, -0.1758261500f, -0.1592365700f, -0.2748677400f, -0.0006143371f,
859 0.0001771948f, -8.470171e-05f, 0.0265180700f, 0.0457907650f, 0.069564960f
860 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100861 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000862 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
863 std::vector<float> inputToCellWeightsValue
864 {
865 -0.0458028300f, -0.0954946200f, -0.0324189850f, -0.0645463300f, -0.0435284530f,
866 0.0430185870f, -0.0491523440f, -0.1241814400f, -0.0789854750f, -0.0759688900f,
867 0.0194843620f, -0.1143496200f, -0.0074034138f, -0.0631484400f, -0.0929814950f,
868 0.0062155537f, -0.0250343380f, -0.0028890965f, 0.0489295270f, 0.0623507500f,
869 0.1066591800f, -0.0320367920f, -0.0850591600f, -0.1084335800f, -0.1300243300f,
870 -0.0368164370f, -0.0213013400f, -0.0165182390f, 0.0047691227f, -0.0025825808f,
871 0.0660178660f, 0.0299915340f, -0.1065283600f, -0.1037554000f, -0.1305607100f,
872 -0.0326664300f, -0.0337024140f, -0.0064734240f, -0.0461169200f, 0.0144193390f,
873 -0.0251743230f, 0.0396852000f, 0.0817775060f, 0.0615746800f, 0.1021009500f,
874 -0.0096581940f, 0.0465117170f, 0.0360390600f, 0.0069369148f, 0.0159600950f,
875 -0.0650766600f, 0.0955159800f, 0.0535688360f, 0.0640871400f, 0.1283566700f,
876 -0.0087143290f, -0.2021196600f, -0.1209367400f, 0.0294504720f, 0.2849013000f,
877 -0.0292279010f, 0.1164364000f, -0.0856026300f, 0.0994178600f, -0.0369995650f,
878 -0.0288426260f, -0.0033637602f, -0.0170129020f, -0.0972086500f, -0.1119335100f,
879 -0.0291551170f, -0.0179360340f, -0.0097689360f, -0.0422332400f, -0.0361596350f,
880 0.0650511200f, -0.0217428920f, -0.0233772120f, -0.0722136400f, -0.0643055200f,
881 0.0545386500f, 0.0911498140f, 0.0638733100f, 0.0075183930f, 0.0559609530f,
882 0.0697793440f, 0.0464111680f, 0.1050991100f, 0.0746389400f, 0.0075130584f,
883 0.0128509820f, 0.0455543100f, 0.0569556880f, 0.0655528500f, 0.0508014560f,
884 -0.0098626830f, 0.0082677200f, -0.0265556090f, -0.0073611983f, -0.0014897042f
885 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100886 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
887 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000888 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
889 std::vector<float> inputToOutputWeightsValue
890 {
891 -0.0998932000f, -0.0720195600f, -0.0528037730f, -0.1562959300f, -0.1500191800f,
892 -0.0765075100f, 0.0235985500f, -0.0751553550f, -0.0803770900f, -0.1509353400f,
893 0.0295175520f, -0.0475139300f, 0.0103505310f, -0.0266485100f, -0.0168397220f,
894 -0.0231211630f, 0.0077019283f, 0.0128512570f, -0.0504064900f, -0.0129761000f,
895 -0.0217377470f, -0.0383057930f, -0.0687058600f, -0.0148124700f, -0.0012853940f,
896 0.1012423600f, 0.0831228350f, 0.0533130060f, -0.0622356460f, -0.0756371540f,
897 -0.0278339030f, 0.0297749710f, 0.1130802000f, 0.0921890600f, 0.0950613500f,
898 -0.0866657640f, -0.0371627060f, -0.0388809140f, -0.0358328450f, -0.0144815640f,
899 -0.0982500300f, -0.1204856900f, -0.0976655860f, -0.0528763300f, -0.0964047000f,
900 -0.1136642900f, 0.0357775050f, 0.1356881900f, 0.0524513830f, 0.0506493040f,
901 0.0579895100f, -0.0218523350f, -0.0998488440f, 0.0147404750f, -0.0788979460f,
902 0.0497469900f, 0.0141604730f, 0.0697393200f, 0.0496494200f, 0.0333646460f,
903 0.0819012400f, 0.0255353670f, 0.0508931650f, 0.0485142540f, 0.0694581300f,
904 -0.0789075640f, -0.0670761600f, -0.1184450800f, -0.0998668800f, -0.0750940300f,
905 0.0626322600f, 0.1492558700f, 0.2018843600f, 0.1209845100f, 0.1463941500f,
906 0.0015017595f, -0.0142673820f, -0.0341725700f, 0.0127114680f, 0.0028300495f,
907 -0.0247584820f, -0.0509854800f, -0.0821182000f, 0.0142256720f, 0.0215441580f,
908 0.0894972500f, 0.0750526800f, -0.0020780868f, 0.0490825800f, 0.0647629500f,
909 -0.0229070630f, 0.0275624560f, 0.0401857350f, 0.0195675770f, -0.0155987390f,
910 -0.0490973030f, -0.0171218660f, -0.0833682340f, -0.0233200200f, -0.084095600f
911 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100912 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
913 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
914 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000915 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
916 std::vector<float> recurrentToInputWeightsValue
917 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100918 -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, // 00
919 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
920 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
921 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000922 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f, // 01
923 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100924 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000925 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100926 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f, // 02
Matteo Martincighc7434122018-11-14 12:27:04 +0000927 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100928 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000929 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100930 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f, // 03
Matteo Martincighc7434122018-11-14 12:27:04 +0000931 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100932 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
933 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
934 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f, // 04
935 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
936 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000937 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
938 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f, // 05
939 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
940 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
941 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100942 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f, // 06
943 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000944 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100945 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
946 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f, // 07
947 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
948 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
949 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
950 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +0000951 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100952 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000953 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100954 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f, // 09
955 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000956 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100957 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
958 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f, // 10
Matteo Martincighc7434122018-11-14 12:27:04 +0000959 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100960 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000961 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
962 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f, // 11
963 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
964 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
965 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
966 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f, // 12
telsoa01ce3e84a2018-08-31 09:31:35 +0100967 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
968 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
969 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
970 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f, // 13
971 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000972 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
973 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100974 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +0000975 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
976 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
977 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100978 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f, // 15
979 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000980 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100981 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
982 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f, // 16
Matteo Martincighc7434122018-11-14 12:27:04 +0000983 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
984 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100985 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
986 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +0000987 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
988 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
989 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
990 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f, // 18
telsoa01ce3e84a2018-08-31 09:31:35 +0100991 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
992 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
993 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
994 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, // 19
Matteo Martincighc7434122018-11-14 12:27:04 +0000995 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
telsoa01ce3e84a2018-08-31 09:31:35 +0100996 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
Matteo Martincighc7434122018-11-14 12:27:04 +0000997 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
998 };
telsoa01ce3e84a2018-08-31 09:31:35 +0100999 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1000 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001001 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1002 std::vector<float> recurrentToForgetWeightsValue
1003 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001004 -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, // 00
Matteo Martincighc7434122018-11-14 12:27:04 +00001005 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001006 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001007 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
1008 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f, // 01
telsoa01ce3e84a2018-08-31 09:31:35 +01001009 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
1010 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001011 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001012 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f, // 02
1013 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001014 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001015 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
1016 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f, // 03
1017 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001018 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
1019 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001020 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f, // 04
Matteo Martincighc7434122018-11-14 12:27:04 +00001021 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
1022 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001023 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
1024 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f, // 05
Matteo Martincighc7434122018-11-14 12:27:04 +00001025 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001026 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001027 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001028 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f, // 06
Matteo Martincighc7434122018-11-14 12:27:04 +00001029 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001030 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001031 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
1032 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f, // 07
1033 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001034 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
1035 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
1036 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +00001037 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
1038 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
1039 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
1040 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f, // 09
telsoa01ce3e84a2018-08-31 09:31:35 +01001041 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001042 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
1043 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001044 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f, // 10
1045 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
1046 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001047 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
1048 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f, // 11
1049 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
1050 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001051 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
1052 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f, // 12
Matteo Martincighc7434122018-11-14 12:27:04 +00001053 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001054 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
1055 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
1056 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f, // 13
1057 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001058 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001059 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
1060 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +00001061 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001062 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001063 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
1064 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f, // 15
1065 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
1066 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
1067 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
1068 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f, // 16
telsoa01ce3e84a2018-08-31 09:31:35 +01001069 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001070 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001071 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
1072 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +00001073 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001074 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
1075 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001076 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f, // 18
1077 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
1078 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
1079 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001080 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, // 19
1081 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001082 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
1083 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
1084 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001085 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1086 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001087 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1088 std::vector<float> recurrentToCellWeightsValue
1089 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001090 -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001091 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
1092 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001093 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001094 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
1095 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001096 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
1097 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
1098 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
1099 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001100 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001101 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
1102 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001103 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
1104 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
1105 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001106 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001107 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
1108 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001109 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001110 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
1111 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
1112 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001113 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001114 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001115 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001116 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001117 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001118 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001119 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001120 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
1121 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001122 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001123 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001124 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001125 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001126 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
1127 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
1128 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
1129 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001130 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
1131 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001132 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001133 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
1134 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
1135 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001136 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001137 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
1138 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
1139 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
1140 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001141 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001142 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001143 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001144 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001145 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
1146 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
1147 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
1148 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001149 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
1150 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001151 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
1152 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
1153 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
1154 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
1155 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001156 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
1157 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001158 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001159 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
1160 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001161 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
1162 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001163 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001164 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
1165 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001166 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001167 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
1168 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001169 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
1170 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001171 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1172 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001173 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1174 std::vector<float> recurrentToOutputWeightsValue
1175 {
1176 0.025825322f, -0.05813119f, 0.09495884f, -0.045984812f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001177 -0.01255415f, -0.0026479573f, -0.08196161f, -0.054914974f,
1178 -0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f,
1179 -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f,
1180 -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f,
1181 -0.023314456f, -0.007401714f, -0.09255757f, 0.029460307f,
1182 -0.08829125f, -0.005139627f, -0.08989442f, -0.0555066f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001183 0.13596267f, -0.025062224f, -0.048351806f, -0.03850004f,
1184 0.07266485f, -0.022414139f, 0.05940088f, 0.075114764f,
1185 0.09597592f, -0.010211725f, -0.0049794707f, -0.011523867f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001186 -0.025980417f, 0.072999895f, 0.11091378f, -0.081685916f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001187 0.014416728f, 0.043229222f, 0.034178585f, -0.07530371f,
1188 0.035837382f, -0.085607f, -0.007721233f, -0.03287832f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001189 -0.043848954f, -0.06404588f, -0.06632928f, -0.073643476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001190 0.008214239f, -0.045984086f, 0.039764922f, 0.03474462f,
1191 0.060612556f, -0.080590084f, 0.049127717f, 0.04151091f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001192 -0.030063879f, 0.008801774f, -0.023021035f, -0.019558564f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001193 0.05158114f, -0.010947698f, -0.011825728f, 0.0075720972f,
1194 0.0699727f, -0.0039981045f, 0.069350146f, 0.08799282f,
1195 0.016156472f, 0.035502106f, 0.11695009f, 0.006217345f,
1196 0.13392477f, -0.037875112f, 0.025745004f, 0.08940699f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001197 -0.00924166f, 0.0046702605f, -0.036598757f, -0.08811812f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001198 0.10522024f, -0.032441203f, 0.008176899f, -0.04454919f,
1199 0.07058152f, 0.0067963637f, 0.039206743f, 0.03259838f,
1200 0.03725492f, -0.09515802f, 0.013326398f, -0.052055415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001201 -0.025676316f, 0.03198509f, -0.015951829f, -0.058556724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001202 0.036879618f, 0.043357447f, 0.028362012f, -0.05908629f,
1203 0.0059240665f, -0.04995891f, -0.019187413f, 0.0276265f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001204 -0.01628143f, 0.0025863599f, 0.08800015f, 0.035250366f,
1205 -0.022165963f, -0.07328642f, -0.009415526f, -0.07455109f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001206 0.11690406f, 0.0363299f, 0.07411125f, 0.042103454f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001207 -0.009660886f, 0.019076364f, 0.018299393f, -0.046004917f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001208 0.08891175f, 0.0431396f, -0.026327137f, -0.051502608f,
1209 0.08979574f, -0.051670972f, 0.04940282f, -0.07491107f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001210 -0.021240504f, 0.022596184f, -0.034280192f, 0.060163025f,
1211 -0.058211457f, -0.051837247f, -0.01349775f, -0.04639988f,
1212 -0.035936575f, -0.011681591f, 0.064818054f, 0.0073146066f,
1213 -0.021745546f, -0.043124277f, -0.06471268f, -0.07053354f,
1214 -0.029321948f, -0.05330136f, 0.016933719f, -0.053782392f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001215 0.13747959f, -0.1361751f, -0.11569455f, 0.0033329215f,
1216 0.05693899f, -0.053219706f, 0.063698f, 0.07977434f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001217 -0.07924483f, 0.06936997f, 0.0034815092f, -0.007305279f,
1218 -0.037325785f, -0.07251102f, -0.033633437f, -0.08677009f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001219 0.091591336f, -0.14165086f, 0.021752775f, 0.019683983f,
1220 0.0011612234f, -0.058154266f, 0.049996935f, 0.0288841f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001221 -0.0024567875f, -0.14345716f, 0.010955264f, -0.10234828f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001222 0.1183656f, -0.0010731248f, -0.023590032f, -0.072285876f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001223 -0.0724771f, -0.026382286f, -0.0014920527f, 0.042667855f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001224 0.0018776858f, 0.02986552f, 0.009814309f, 0.0733756f,
1225 0.12289186f, 0.018043943f, -0.0458958f, 0.049412545f,
1226 0.033632483f, 0.05495232f, 0.036686596f, -0.013781798f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001227 -0.010036754f, 0.02576849f, -0.08307328f, 0.010112348f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001228 0.042521734f, -0.05869831f, -0.071689695f, 0.03876447f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001229 -0.13275425f, -0.0352966f, -0.023077697f, 0.10285965f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001230 0.084736146f, 0.15568255f, -0.00040734606f, 0.027835453f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001231 -0.10292561f, -0.032401145f, 0.10053256f, -0.026142767f,
1232 -0.08271222f, -0.0030240538f, -0.016368777f, 0.1070414f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001233 0.042672627f, 0.013456989f, -0.0437609f, -0.022309763f,
1234 0.11576483f, 0.04108048f, 0.061026827f, -0.0190714f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001235 -0.0869359f, 0.037901703f, 0.0610107f, 0.07202949f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001236 0.01675338f, 0.086139716f, -0.08795751f, -0.014898893f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001237 -0.023771819f, -0.01965048f, 0.007955471f, -0.043740474f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001238 0.03346837f, -0.10549954f, 0.090567775f, 0.042013682f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001239 -0.03176985f, 0.12569028f, -0.02421228f, -0.029526481f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001240 0.023851605f, 0.031539805f, 0.05292009f, -0.02344001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001241 -0.07811758f, -0.08834428f, 0.10094801f, 0.16594367f,
1242 -0.06861939f, -0.021256343f, -0.041093912f, -0.06669611f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001243 0.035498552f, 0.021757556f, -0.09302526f, -0.015403468f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001244 -0.06614931f, -0.051798206f, -0.013874718f, 0.03630673f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001245 0.010412845f, -0.08077351f, 0.046185967f, 0.0035662893f,
1246 0.03541868f, -0.094149634f, -0.034814864f, 0.003128424f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001247 -0.020674974f, -0.03944324f, -0.008110165f, -0.11113267f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001248 0.08484226f, 0.043586485f, 0.040582247f, 0.0968012f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001249 -0.065249965f, -0.028036479f, 0.0050708856f, 0.0017462453f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001250 0.0326779f, 0.041296225f, 0.09164146f, -0.047743853f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001251 -0.015952192f, -0.034451712f, 0.084197424f, -0.05347844f,
1252 -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001253 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001254 -0.057309967f, -0.012775832f, -0.0032452994f, 0.01977615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001255 -0.041040014f, -0.024264973f, 0.063464895f, 0.05431621f
1256 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001257 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001258 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1259 std::vector<float> cellToInputWeightsValue
1260 {
1261 0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
1262 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
1263 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f, -0.052169047f,
1264 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
1265 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001266 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001267 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1268 std::vector<float> cellToForgetWeightsValue
1269 {
1270 -0.01998659f, -0.15568835f, -0.24248174f, -0.012770197f, 0.041331276f,
1271 -0.072311886f, -0.052123554f, -0.0066330447f, -0.043891653f, 0.036225766f,
1272 -0.047248036f, 0.021479502f, 0.033189066f, 0.11952997f, -0.020432774f,
1273 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
1274 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001275 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001276 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1277 std::vector<float> cellToOutputWeightsValue
1278 {
1279 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
1280 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
1281 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
1282 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
1283 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001284 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001285 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1286 std::vector<float> inputGateBiasValue
1287 {
1288 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
1289 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
1290 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
1291 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
1292 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001293 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001294 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1295 std::vector<float> forgetGateBiasValue
1296 {
1297 0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, 0.11098921f,
1298 0.15378423f, 0.09263801f, 0.09790885f, 0.09508917f, 0.061199076f,
1299 0.07665568f, -0.015443159f, -0.03499149f, 0.046190713f, 0.08895977f,
1300 0.10899629f, 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
1301 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001302 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001303 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1304 std::vector<float> cellBiasValue
1305 {
1306 -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, -0.1483596f,
1307 -0.10639995f, -0.091433935f, 0.058573797f, -0.06809782f, -0.07889636f,
1308 -0.043246906f, -0.09829136f, -0.4279842f, 0.034901652f, 0.18797937f,
1309 0.0075234566f, 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
1310 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001311 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001312 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1313 std::vector<float> outputGateBiasValue
1314 {
1315 0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
1316 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
1317 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
1318 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
1319 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001320 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1321 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001322 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
1323 std::vector<float> projectionWeightsValue
1324 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001325 -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001326 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001327 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
1328 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001329 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
1330 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
1331 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
1332 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001333 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
1334 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
1335 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001336 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
1337 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
1338 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
1339 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
1340 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001341 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001342 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001343 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001344 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001345 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
1346 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001347 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001348 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001349 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001350 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
1351 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001352 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001353 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
1354 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
1355 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001356 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
1357 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001358 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001359 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
1360 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
1361 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
1362 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
1363 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001364 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
1365 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001366 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001367 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
1368 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001369 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
1370 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
1371 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001372 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
1373 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
1374 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001375 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001376 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001377 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
1378 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001379 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
1380 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
1381 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001382 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001383 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
1384 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
1385 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001386 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
1387 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
1388 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
1389 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001390 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001391 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1392 std::vector<float> projectionBiasValue(outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001393
1394 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001395 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1396 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001397 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001398 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1399 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001400
Matteo Martincighc7434122018-11-14 12:27:04 +00001401 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001402 // 20: The activation function: A value indicating the activation function:
1403 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001404 hidl_vec<uint32_t> activationFunctionDimensions{};
1405 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001406 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1407 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001408 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1409 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001410 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1411 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001412 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1413 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001414
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001415 // Normalization:
1416 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1417 // Used to rescale normalized inputs to activation at input gate.
1418 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1419 std::vector<float> inputLayerNormWeightsValue;
1420 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1421 // Used to rescale normalized inputs to activation at forget gate.
1422 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1423 std::vector<float> forgetLayerNormWeightsValue;
1424 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1425 // Used to rescale normalized inputs to activation at cell gate.
1426 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1427 std::vector<float> cellLayerNormWeightsValue;
1428 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1429 // Used to rescale normalized inputs to activation at output gate.
1430 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1431 std::vector<float> outputLayerNormWeightsValue;
1432
telsoa01ce3e84a2018-08-31 09:31:35 +01001433 // Outputs:
1434 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1435 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001436 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1437 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1438 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1439 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1440 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
1441 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001442 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001443 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1444 std::vector<float> outputStateOutValue
1445 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001446 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835577f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001447 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415119f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001448 -0.013869f, 0.0287268f, -0.00334694f, 0.00733397f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001449 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.0216801f
1450 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001451 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001452 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1453 std::vector<float> cellStateOutValue
1454 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001455 -0.0531632f, -0.0118138f, 0.0870833f, 0.0347929f, -0.076144f,
1456 -0.0659219f, -0.0463811f, 0.0141307f, -0.0127706f, -0.03782f,
1457 -0.00402401f, -0.00571876f, -0.187957f, -0.0247127f, 0.0711425f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001458 0.008244f, 0.0492649f, 0.126972f, 0.0933097f, 0.29848f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001459 -0.0966178f, -0.114417f, 0.0387229f, 0.0453255f, -0.181286f,
1460 -0.0651251f, -0.0996879f, -0.00276995f, 0.0617558f, -0.0100728f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001461 0.056304f, -0.077416f, -0.162858f, -0.0541251f, 0.0571202f,
1462 -0.0525331f, 0.0724297f, 0.171029f, 0.141738f, 0.295483f
1463 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001464 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1465 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001466 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1467 std::vector<float> outputValue
1468 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001469 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001470 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001471 -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001472 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f
1473 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001474
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001475 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1476 inputToInputWeightsDimensions, inputToInputWeightsValue,
1477 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1478 inputToCellWeightsDimensions, inputToCellWeightsValue,
1479 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1480 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1481 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1482 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1483 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1484 cellToInputWeightsDimensions, cellToInputWeightsValue,
1485 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1486 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1487 inputGateBiasDimensions, inputGateBiasValue,
1488 forgetGateBiasDimensions, forgetGateBiasValue,
1489 cellBiasDimensions, cellBiasValue,
1490 outputGateBiasDimensions, outputGateBiasValue,
1491 projectionWeightsDimensions, projectionWeightsValue,
1492 projectionBiasDimensions, projectionBiasValue,
1493 outputStateInDimensions, outputStateInValue,
1494 cellStateInDimensions, cellStateInValue,
1495 activationFunctionDimensions, activationFunctionValue,
1496 cellClippingThresholdDimensions, cellClippingThresholdValue,
1497 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1498 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1499 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1500 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1501 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1502 scratchBufferDimensions, scratchBufferValue,
1503 outputStateOutDimensions, outputStateOutValue,
1504 cellStateOutDimensions, cellStateOutValue,
1505 outputDimensions, outputValue,
1506 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +01001507}
1508
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001509template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +00001510void LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +01001511{
1512 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
1513 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
1514 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1515 // The batch size has been increased to 2 (it was 1 in the VTS test) with appropriate input and output values added.
1516
1517 uint32_t batchSize = 2;
1518 uint32_t inputSize = 2;
1519 uint32_t numUnits = 4;
1520 uint32_t outputSize = numUnits;
1521
1522 // Inputs:
1523 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1524 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +00001525 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1526 std::vector<float> inputValue{2.0f, 3.0f, 3.0f, 4.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001527
1528 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1529 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +00001530 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1531 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001532 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1533 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001534 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1535 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
1536 0.13056988f, -0.36333650f,
1537 -0.22755712f, 0.28253698f,
1538 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001539 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001540 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1541 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
1542 -0.09624726f, 0.05100781f,
1543 0.04717243f, 0.48944736f,
1544 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001545 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1546 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001547 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1548 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
1549 -0.55932593f, -0.09426838f,
1550 -0.44257352f, 0.54939759f,
1551 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001552 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1553 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1554 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +00001555 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
1556 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001557 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1558 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001559 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1560 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
1561 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
1562 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
1563 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001564 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1565 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001566 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1567 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
1568 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
1569 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
1570 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001571 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1572 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001573 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1574 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
1575 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
1576 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1577 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001578 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001579 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1580 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001581 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001582 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1583 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001584 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001585 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1586 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001587 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001588 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
1589 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001590 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001591 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1592 std::vector<float> forgetGateBiasValue{1.0f, 1.0f, 1.0f, 1.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001593 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001594 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1595 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001596 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001597 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1598 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001599 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1600 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001601 hidl_vec<uint32_t> projectionWeightsDimensions{0};
1602 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001603 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001604 hidl_vec<uint32_t> projectionBiasDimensions{0};
1605 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001606
1607 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001608 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1609 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001610 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001611 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1612 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001613
Matteo Martincighc7434122018-11-14 12:27:04 +00001614 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001615 // 20: The activation function: A value indicating the activation function:
1616 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001617 hidl_vec<uint32_t> activationFunctionDimensions{};
1618 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001619 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1620 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001621 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1622 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001623 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1624 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001625 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1626 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001627
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001628 // Normalization:
1629 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1630 // Used to rescale normalized inputs to activation at input gate.
1631 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1632 std::vector<float> inputLayerNormWeightsValue;
1633 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1634 // Used to rescale normalized inputs to activation at forget gate.
1635 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1636 std::vector<float> forgetLayerNormWeightsValue;
1637 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1638 // Used to rescale normalized inputs to activation at cell gate.
1639 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1640 std::vector<float> cellLayerNormWeightsValue;
1641 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1642 // Used to rescale normalized inputs to activation at output gate.
1643 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1644 std::vector<float> outputLayerNormWeightsValue;
1645
telsoa01ce3e84a2018-08-31 09:31:35 +01001646 // Outputs:
1647 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1648 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001649 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1650 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1651 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1652 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1653 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1654 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001655 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001656 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1657 std::vector<float> outputStateOutValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1658 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001659 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001660 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1661 std::vector<float> cellStateOutValue{-0.76044439f, -0.01804161f, 0.18226376f, -0.06493707f,
1662 -0.90477051f, -0.04355603f, 0.18475688f, -0.04158677f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001663 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1664 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001665 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1666 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1667 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001668
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001669 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1670 inputToInputWeightsDimensions, inputToInputWeightsValue,
1671 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1672 inputToCellWeightsDimensions, inputToCellWeightsValue,
1673 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1674 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1675 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1676 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1677 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1678 cellToInputWeightsDimensions, cellToInputWeightsValue,
1679 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1680 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1681 inputGateBiasDimensions, inputGateBiasValue,
1682 forgetGateBiasDimensions, forgetGateBiasValue,
1683 cellBiasDimensions, cellBiasValue,
1684 outputGateBiasDimensions, outputGateBiasValue,
1685 projectionWeightsDimensions, projectionWeightsValue,
1686 projectionBiasDimensions, projectionBiasValue,
1687 outputStateInDimensions, outputStateInValue,
1688 cellStateInDimensions, cellStateInValue,
1689 activationFunctionDimensions, activationFunctionValue,
1690 cellClippingThresholdDimensions, cellClippingThresholdValue,
1691 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1692 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1693 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1694 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1695 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1696 scratchBufferDimensions, scratchBufferValue,
1697 outputStateOutDimensions, outputStateOutValue,
1698 cellStateOutDimensions, cellStateOutValue,
1699 outputDimensions, outputValue,
1700 compute);
Matteo Martincighc7434122018-11-14 12:27:04 +00001701}
Matteo Martincighc7434122018-11-14 12:27:04 +00001702
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001703template <typename HalPolicy>
1704void LstmNoCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
Matteo Martincighc7434122018-11-14 12:27:04 +00001705{
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001706 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
1707 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
1708 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1709
1710 uint32_t batchSize = 2;
1711 uint32_t inputSize = 5;
1712 uint32_t numUnits = 4;
1713 uint32_t outputSize = 3;
1714
1715 // Inputs:
1716 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1717 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1718 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1719 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
1720 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
1721
1722 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1723 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1724 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1725 std::vector<float> inputToInputWeightsValue{ 0.5, 0.6, 0.7, -0.8, -0.9,
1726 0.1, 0.2, 0.3, -0.4, 0.5,
1727 -0.8, 0.7, -0.6, 0.5, -0.4,
1728 -0.5, -0.4, -0.3, -0.2, -0.1};
1729 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1730 // [num_units, input_size].
1731 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1732 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
1733 -0.5, -0.2, -0.4, 0.3, -0.8,
1734 -0.4, 0.3, -0.5, -0.4, -0.6,
1735 0.3, -0.4, -0.6, -0.5, -0.5};
1736 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1737 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1738 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
1739 0.5, -0.2, -0.3, -0.2, -0.6,
1740 0.6, -0.1, -0.4, -0.3, -0.7,
1741 0.7, -0.9, -0.5, 0.8, 0.6};
1742 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1743 // [num_units, input_size].
1744 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1745 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
1746 -0.7, 0.3, -0.3, -0.8, -0.2,
1747 0.6, -0.2, 0.4, -0.7, -0.3,
1748 -0.5, 0.1, 0.5, -0.6, -0.4};
1749 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1750 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1751 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1752 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
1753 std::vector<float> recurrentToInputWeightsValue{-0.2, -0.3, 0.4,
1754 0.1, -0.5, 0.9,
1755 -0.2, -0.3, -0.7,
1756 0.05, -0.2, -0.6};
1757 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1758 // [num_units, output_size].
1759 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1760 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
1761 -0.2, 0.6, 0.4,
1762 0.9, 0.3, -0.1,
1763 0.2, 0.5, 0.2};
1764 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1765 // [num_units, output_size].
1766 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1767 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
1768 -0.3, 0.8,-0.08,
1769 -0.2, 0.3, 0.8,
1770 -0.6, -0.1, 0.2};
1771 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1772 // [num_units, output_size].
1773 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1774 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
1775 -0.2, -0.5, -0.7,
1776 -0.2, -0.6, -0.1,
1777 -0.4, -0.7, -0.2};
1778 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1779 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1780 std::vector<float> cellToInputWeightsValue{0.05, 0.1, 0.25, 0.15};
1781 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1782 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1783 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
1784 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1785 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1786 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
1787 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1788 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1789 std::vector<float> inputGateBiasValue{0.03, 0.15, 0.22, 0.38};
1790 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1791 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1792 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
1793 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1794 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1795 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
1796 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1797 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1798 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
1799 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1800 // [output_size, num_units].
1801 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
1802 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
1803 -0.2, 0.1, 0.5,
1804 0.3, 0.08, 0.07,
1805 0.2, -0.4, 0.2};
1806 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1807 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1808 std::vector<float> projectionBiasValue(outputSize, 0.0f);
1809 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1810 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1811 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
1812 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1813 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1814 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
1815
1816 // Constant scalar values (the VTS test adds these as tensors of dim {})
1817 // 20: The activation function: A value indicating the activation function:
1818 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1819 hidl_vec<uint32_t> activationFunctionDimensions{};
1820 std::vector<int32_t> activationFunctionValue{4};
1821 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1822 // If set to 0.0 then clipping is disabled.
1823 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1824 std::vector<float> cellClippingThresholdValue{0.0f};
1825 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1826 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1827 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1828 std::vector<float> projectionClippingThresholdValue{0.0f};
1829
1830 // Normalization:
1831 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
1832 // Used to rescale normalized inputs to activation at input gate.
1833 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
1834 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
1835 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
1836 // Used to rescale normalized inputs to activation at forget gate.
1837 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
1838 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
1839 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
1840 // Used to rescale normalized inputs to activation at cell gate.
1841 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
1842 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
1843 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
1844 // Used to rescale normalized inputs to activation at output gate.
1845 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
1846 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
1847
1848 // Outputs:
1849 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1850 // CIFG, or [batch_size, num_units * 3] without CIFG.
1851 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1852 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1853 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1854 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1855 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
1856 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
1857 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1858 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1859 std::vector<float> outputStateOutValue { 0.02440767f, 0.12802738f, -0.00170918f,
1860 -0.00692428f, 0.08487406f, 0.06344498f};
1861 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1862 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1863 std::vector<float> cellStateOutValue {-0.45177122f, 0.37691566f, 0.22542511f, 0.23240635f,
1864 -0.25258583f, 0.33042118f, 0.01730525f, 0.36660123f};
1865 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1866 // effectively the same as the current “output state (out)” value.
1867 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1868 std::vector<float> outputValue{ 0.02440767f, 0.12802738f, -0.00170918f,
1869 -0.00692428f, 0.08487406f, 0.06344498f};
1870
1871 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1872 inputToInputWeightsDimensions, inputToInputWeightsValue,
1873 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1874 inputToCellWeightsDimensions, inputToCellWeightsValue,
1875 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1876 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1877 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1878 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1879 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1880 cellToInputWeightsDimensions, cellToInputWeightsValue,
1881 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1882 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1883 inputGateBiasDimensions, inputGateBiasValue,
1884 forgetGateBiasDimensions, forgetGateBiasValue,
1885 cellBiasDimensions, cellBiasValue,
1886 outputGateBiasDimensions, outputGateBiasValue,
1887 projectionWeightsDimensions, projectionWeightsValue,
1888 projectionBiasDimensions, projectionBiasValue,
1889 outputStateInDimensions, outputStateInValue,
1890 cellStateInDimensions, cellStateInValue,
1891 activationFunctionDimensions, activationFunctionValue,
1892 cellClippingThresholdDimensions, cellClippingThresholdValue,
1893 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1894 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1895 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1896 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1897 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1898 scratchBufferDimensions, scratchBufferValue,
1899 outputStateOutDimensions, outputStateOutValue,
1900 cellStateOutDimensions, cellStateOutValue,
1901 outputDimensions, outputValue,
1902 compute);
Matteo Martincighc7434122018-11-14 12:27:04 +00001903}
1904
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001905template <typename HalPolicy>
1906void LstmCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
Matteo Martincighc7434122018-11-14 12:27:04 +00001907{
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001908 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
1909 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
1910 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
Matteo Martincighc7434122018-11-14 12:27:04 +00001911
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001912 uint32_t batchSize = 2;
1913 uint32_t inputSize = 5;
1914 uint32_t numUnits = 4;
1915 uint32_t outputSize = 3;
Matteo Martincighc7434122018-11-14 12:27:04 +00001916
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001917 // Inputs:
1918 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1919 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1920 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1921 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
1922 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
telsoa01ce3e84a2018-08-31 09:31:35 +01001923
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001924 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1925 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1926 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1927 std::vector<float> inputToInputWeightsValue;
1928 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1929 // [num_units, input_size].
1930 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1931 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
1932 -0.5, -0.2, -0.4, 0.3, -0.8,
1933 -0.4, 0.3, -0.5, -0.4, -0.6,
1934 0.3, -0.4, -0.6, -0.5, -0.5};
1935 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1936 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1937 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
1938 0.5, -0.2, -0.3, -0.2, -0.6,
1939 0.6, -0.1, -0.4, -0.3, -0.7,
1940 0.7, -0.9, -0.5, 0.8, 0.6};
1941 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1942 // [num_units, input_size].
1943 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1944 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
1945 -0.7, 0.3, -0.3, -0.8, -0.2,
1946 0.6, -0.2, 0.4, -0.7, -0.3,
1947 -0.5, 0.1, 0.5, -0.6, -0.4};
1948 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1949 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1950 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1951 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0};
1952 std::vector<float> recurrentToInputWeightsValue;
1953 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1954 // [num_units, output_size].
1955 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1956 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
1957 -0.2, 0.6, 0.4,
1958 0.9, 0.3, -0.1,
1959 0.2, 0.5, 0.2};
1960 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1961 // [num_units, output_size].
1962 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1963 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
1964 -0.3, 0.8,-0.08,
1965 -0.2, 0.3, 0.8,
1966 -0.6, -0.1, 0.2};
1967 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1968 // [num_units, output_size].
1969 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1970 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
1971 -0.2, -0.5, -0.7,
1972 -0.2, -0.6, -0.1,
1973 -0.4, -0.7, -0.2};
1974 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1975 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1976 std::vector<float> cellToInputWeightsValue;
1977 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1978 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1979 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
1980 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1981 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1982 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
1983 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1984 hidl_vec<uint32_t> inputGateBiasDimensions{0};
1985 std::vector<float> inputGateBiasValue;
1986 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1987 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1988 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
1989 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1990 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1991 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
1992 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1993 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1994 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
1995 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1996 // [output_size, num_units].
1997 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
1998 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
1999 -0.2, 0.1, 0.5,
2000 0.3, 0.08, 0.07,
2001 0.2, -0.4, 0.2};
2002 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2003 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2004 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2005 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2006 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2007 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2008 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2009 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2010 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2011
2012 // Constant scalar values (the VTS test adds these as tensors of dim {})
2013 // 20: The activation function: A value indicating the activation function:
2014 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2015 hidl_vec<uint32_t> activationFunctionDimensions{};
2016 std::vector<int32_t> activationFunctionValue{4};
2017 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2018 // If set to 0.0 then clipping is disabled.
2019 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2020 std::vector<float> cellClippingThresholdValue{0.0f};
2021 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2022 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2023 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2024 std::vector<float> projectionClippingThresholdValue{0.0f};
2025
2026 // Normalization:
2027 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2028 // Used to rescale normalized inputs to activation at input gate.
2029 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2030 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2031 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2032 // Used to rescale normalized inputs to activation at forget gate.
2033 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2034 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2035 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2036 // Used to rescale normalized inputs to activation at cell gate.
2037 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2038 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2039 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2040 // Used to rescale normalized inputs to activation at output gate.
2041 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2042 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2043
2044 // Outputs:
2045 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2046 // CIFG, or [batch_size, num_units * 3] without CIFG.
2047 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2048 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2049 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2050 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2051 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
2052 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
2053 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2054 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2055 std::vector<float> outputStateOutValue { 0.02129706f, 0.14081624f, 0.01127331f,
2056 -0.02263505f, 0.09169482f, 0.07691758f};
2057 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2058 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2059 std::vector<float> cellStateOutValue{-0.35102980f, 0.42610350f, 0.21463650f, 0.27716520f,
2060 -0.18855170f, 0.32522000f, 0.02036650f, 0.48967660f};
2061 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2062 // effectively the same as the current “output state (out)” value.
2063 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2064 std::vector<float> outputValue{ 0.02129706f, 0.14081624f, 0.01127331f,
2065 -0.02263505f, 0.09169482f, 0.07691758f};
2066
2067 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2068 inputToInputWeightsDimensions, inputToInputWeightsValue,
2069 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2070 inputToCellWeightsDimensions, inputToCellWeightsValue,
2071 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2072 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2073 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2074 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2075 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2076 cellToInputWeightsDimensions, cellToInputWeightsValue,
2077 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2078 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2079 inputGateBiasDimensions, inputGateBiasValue,
2080 forgetGateBiasDimensions, forgetGateBiasValue,
2081 cellBiasDimensions, cellBiasValue,
2082 outputGateBiasDimensions, outputGateBiasValue,
2083 projectionWeightsDimensions, projectionWeightsValue,
2084 projectionBiasDimensions, projectionBiasValue,
2085 outputStateInDimensions, outputStateInValue,
2086 cellStateInDimensions, cellStateInValue,
2087 activationFunctionDimensions, activationFunctionValue,
2088 cellClippingThresholdDimensions, cellClippingThresholdValue,
2089 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2090 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2091 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2092 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2093 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2094 scratchBufferDimensions, scratchBufferValue,
2095 outputStateOutDimensions, outputStateOutValue,
2096 cellStateOutDimensions, cellStateOutValue,
2097 outputDimensions, outputValue,
2098 compute);
2099}