blob: f0d3d853c61d3dd7c18f082ea6098cd664165d90 [file] [log] [blame]
telsoa01ce3e84a2018-08-31 09:31:35 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa01ce3e84a2018-08-31 09:31:35 +01004//
Matteo Martincighc7434122018-11-14 12:27:04 +00005
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01006#pragma once
7
8#include "DriverTestHelpers.hpp"
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +01009
Matteo Martincighc7434122018-11-14 12:27:04 +000010#include <boost/array.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010011#include <boost/math/special_functions/relative_difference.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010012
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +010013using ArmnnDriver = armnn_driver::ArmnnDriver;
telsoa01ce3e84a2018-08-31 09:31:35 +010014using DriverOptions = armnn_driver::DriverOptions;
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +010015
telsoa01ce3e84a2018-08-31 09:31:35 +010016using namespace driverTestHelpers;
17using namespace android::hardware;
18
19namespace
20{
21
22template<typename T>
Matteo Martincighc7434122018-11-14 12:27:04 +000023RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
telsoa01ce3e84a2018-08-31 09:31:35 +010024{
25 DataLocation inputInloc = {};
26 inputInloc.poolIndex = poolIndex;
27 inputInloc.offset = 0;
28 inputInloc.length = value.size() * sizeof(T);
29 RequestArgument inputRequestArgument = {};
30 inputRequestArgument.location = inputInloc;
31 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
32 return inputRequestArgument;
33}
34
35// Returns true if the relative difference between two float values is less than the tolerance value given.
36// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
37bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f)
38{
39 float rd;
40 if (a == 0.0f)
41 {
42 rd = fabs(b);
43 }
44 else if (b == 0.0f)
45 {
46 rd = fabs(a);
47 }
48 else
49 {
50 rd = boost::math::relative_difference(a, b);
51 }
52 return rd < tolerance;
53}
54
Kevin Mayf29a2c52019-03-14 11:56:32 +000055// Helper function to create an OperandLifeTime::NO_VALUE for testing.
56// To be used on optional input operands that have no values - these are valid and should be tested.
57OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
58{
59 // Only create a NO_VALUE for optional operands that have no elements
60 if (dimensions.size() == 0 || dimensions[0] == 0)
61 {
62 return OperandLifeTime::NO_VALUE;
63 }
64 return OperandLifeTime::CONSTANT_COPY;
65}
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010066
67template<typename HalModel>
68void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const Request& request)
69{
70 android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, driver);
71 if (preparedModel.get() != nullptr)
72 {
73 Execute(preparedModel, request);
74 }
75}
76
77#ifdef ARMNN_ANDROID_NN_V1_2
78
79template<>
80void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::hal_1_2::HalPolicy::Model& model,
81 armnn_driver::ArmnnDriver& driver,
82 const Request& request)
83{
84 android::sp<V1_2::IPreparedModel> preparedModel = PrepareModel_1_2(model, driver);
85 if (preparedModel.get() != nullptr)
86 {
87 Execute(preparedModel, request);
88 }
89}
90
91#endif
92
Matteo Martincighc7434122018-11-14 12:27:04 +000093} // anonymous namespace
telsoa01ce3e84a2018-08-31 09:31:35 +010094
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010095#ifndef ARMCOMPUTECL_ENABLED
96static const boost::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
97#else
98static const boost::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
99#endif
telsoa01ce3e84a2018-08-31 09:31:35 +0100100
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100101// Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
102template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000103void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
104 const std::vector<float>& inputValue,
105 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
106 const std::vector<float>& inputToInputWeightsValue,
107 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
108 const std::vector<float>& inputToForgetWeightsValue,
109 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
110 const std::vector<float>& inputToCellWeightsValue,
111 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
112 const std::vector<float>& inputToOutputWeightsValue,
113 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
114 const std::vector<float>& recurrentToInputWeightsValue,
115 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
116 const std::vector<float>& recurrentToForgetWeightsValue,
117 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
118 const std::vector<float>& recurrentToCellWeightsValue,
119 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
120 const std::vector<float>& recurrentToOutputWeightsValue,
121 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
122 const std::vector<float>& cellToInputWeightsValue,
123 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
124 const std::vector<float>& cellToForgetWeightsValue,
125 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
126 const std::vector<float>& cellToOutputWeightsValue,
127 const hidl_vec<uint32_t>& inputGateBiasDimensions,
128 const std::vector<float>& inputGateBiasValue,
129 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
130 const std::vector<float>& forgetGateBiasValue,
131 const hidl_vec<uint32_t>& cellBiasDimensions,
132 const std::vector<float>& cellBiasValue,
133 const hidl_vec<uint32_t>& outputGateBiasDimensions,
134 const std::vector<float>& outputGateBiasValue,
135 const hidl_vec<uint32_t>& projectionWeightsDimensions,
136 const std::vector<float>& projectionWeightsValue,
137 const hidl_vec<uint32_t>& projectionBiasDimensions,
138 const std::vector<float>& projectionBiasValue,
139 const hidl_vec<uint32_t>& outputStateInDimensions,
140 const std::vector<float>& outputStateInValue,
141 const hidl_vec<uint32_t>& cellStateInDimensions,
142 const std::vector<float>& cellStateInValue,
143 const hidl_vec<uint32_t>& activationFunctionDimensions,
144 const std::vector<int32_t>& activationFunctionValue,
145 const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
146 const std::vector<float>& cellClippingThresholdValue,
147 const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
148 const std::vector<float>& projectionClippingThresholdValue,
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100149 const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
150 const std::vector<float>& inputLayerNormWeightsValue,
151 const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
152 const std::vector<float>& forgetLayerNormWeightsValue,
153 const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
154 const std::vector<float>& cellLayerNormWeightsValue,
155 const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
156 const std::vector<float>& outputLayerNormWeightsValue,
Matteo Martincighc7434122018-11-14 12:27:04 +0000157 const hidl_vec<uint32_t>& scratchBufferDimensions,
158 const std::vector<float>& scratchBufferValue,
159 const hidl_vec<uint32_t>& outputStateOutDimensions,
160 const std::vector<float>& outputStateOutValue,
161 const hidl_vec<uint32_t>& cellStateOutDimensions,
162 const std::vector<float>& cellStateOutValue,
163 const hidl_vec<uint32_t>& outputDimensions,
164 const std::vector<float>& outputValue,
165 armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100166{
Matteo Martincighc7434122018-11-14 12:27:04 +0000167 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100168 using Model = typename HalPolicy::Model;
169 Model model = {};
telsoa01ce3e84a2018-08-31 09:31:35 +0100170
171 // Inputs:
172 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
173 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100174 AddInputOperand<HalPolicy>(model, inputDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100175
176 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
177 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100178 AddTensorOperand<HalPolicy>(model,
179 inputToInputWeightsDimensions,
180 inputToInputWeightsValue,
181 HalPolicy::OperandType::TENSOR_FLOAT32,
182 CreateNoValueLifeTime(inputToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100183 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
184 // [num_units, input_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100185 AddTensorOperand<HalPolicy>(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
186 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
187 // [num_units, input_size].
188 AddTensorOperand<HalPolicy>(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100189 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
190 // [num_units, input_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100191 AddTensorOperand<HalPolicy>(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100192 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
193 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
194 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100195 AddTensorOperand<HalPolicy>(model,
196 recurrentToInputWeightsDimensions,
197 recurrentToInputWeightsValue,
198 HalPolicy::OperandType::TENSOR_FLOAT32,
199 CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100200 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
201 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100202 AddTensorOperand<HalPolicy>(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100203 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
204 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100205 AddTensorOperand<HalPolicy>(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100206 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
207 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100208 AddTensorOperand<HalPolicy>(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100209 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100210 AddTensorOperand<HalPolicy>(model,
211 cellToInputWeightsDimensions,
212 cellToInputWeightsValue,
213 HalPolicy::OperandType::TENSOR_FLOAT32,
214 CreateNoValueLifeTime(cellToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100215 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100216 AddTensorOperand<HalPolicy>(model,
217 cellToForgetWeightsDimensions,
218 cellToForgetWeightsValue,
219 HalPolicy::OperandType::TENSOR_FLOAT32,
220 CreateNoValueLifeTime(cellToForgetWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100221 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100222 AddTensorOperand<HalPolicy>(model,
223 cellToOutputWeightsDimensions,
224 cellToOutputWeightsValue,
225 HalPolicy::OperandType::TENSOR_FLOAT32,
226 CreateNoValueLifeTime(cellToOutputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100227 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100228 AddTensorOperand<HalPolicy>(model,
229 inputGateBiasDimensions,
230 inputGateBiasValue,
231 HalPolicy::OperandType::TENSOR_FLOAT32,
232 CreateNoValueLifeTime(inputGateBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100233 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100234 AddTensorOperand<HalPolicy>(model, forgetGateBiasDimensions, forgetGateBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100235 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100236 AddTensorOperand<HalPolicy>(model, cellBiasDimensions, cellBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100237 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100238 AddTensorOperand<HalPolicy>(model, outputGateBiasDimensions, outputGateBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100239 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
240 // [output_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100241 AddTensorOperand<HalPolicy>(model,
242 projectionWeightsDimensions,
243 projectionWeightsValue,
244 HalPolicy::OperandType::TENSOR_FLOAT32,
245 CreateNoValueLifeTime(projectionWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100246 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100247 AddTensorOperand<HalPolicy>(model,
248 projectionBiasDimensions,
249 projectionBiasValue,
250 HalPolicy::OperandType::TENSOR_FLOAT32,
251 CreateNoValueLifeTime(projectionBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100252
253 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100254 AddInputOperand<HalPolicy>(model, outputStateInDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100255 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100256 AddInputOperand<HalPolicy>(model, cellStateInDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100257
Matteo Martincighc7434122018-11-14 12:27:04 +0000258 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100259 // 20: The activation function: A value indicating the activation function:
260 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100261 AddTensorOperand<HalPolicy>(model,
262 activationFunctionDimensions,
263 activationFunctionValue,
264 HalPolicy::OperandType::INT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100265 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
266 // If set to 0.0 then clipping is disabled.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100267 AddTensorOperand<HalPolicy>(model,
268 cellClippingThresholdDimensions,
269 cellClippingThresholdValue,
270 HalPolicy::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100271 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
272 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100273 AddTensorOperand<HalPolicy>(model,
274 projectionClippingThresholdDimensions,
275 projectionClippingThresholdValue,
276 HalPolicy::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100277
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100278 bool normalizationEnabled = false;
279
280 // If any of the tensors have a value all normalization tensors are set
281 if (!inputLayerNormWeightsValue.empty() ||
282 !forgetLayerNormWeightsValue.empty() ||
283 !cellLayerNormWeightsValue.empty() ||
284 !outputLayerNormWeightsValue.empty())
285 {
286 // Normalization:
287 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
288 // Used to rescale normalized inputs to activation at input gate.
289 AddTensorOperand<HalPolicy>(model,
290 inputLayerNormWeightsDimensions,
291 inputLayerNormWeightsValue,
292 HalPolicy::OperandType::TENSOR_FLOAT32,
293 CreateNoValueLifeTime(inputLayerNormWeightsDimensions));
294 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
295 // Used to rescale normalized inputs to activation at forget gate.
296 AddTensorOperand<HalPolicy>(model,
297 forgetLayerNormWeightsDimensions,
298 forgetLayerNormWeightsValue,
299 HalPolicy::OperandType::TENSOR_FLOAT32,
300 CreateNoValueLifeTime(forgetLayerNormWeightsDimensions));
301 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
302 // Used to rescale normalized inputs to activation at cell gate.
303 AddTensorOperand<HalPolicy>(model,
304 cellLayerNormWeightsDimensions,
305 cellLayerNormWeightsValue,
306 HalPolicy::OperandType::TENSOR_FLOAT32,
307 CreateNoValueLifeTime(cellLayerNormWeightsDimensions));
308 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
309 // Used to rescale normalized inputs to activation at output gate.
310 AddTensorOperand<HalPolicy>(model,
311 outputLayerNormWeightsDimensions,
312 outputLayerNormWeightsValue,
313 HalPolicy::OperandType::TENSOR_FLOAT32,
314 CreateNoValueLifeTime(outputLayerNormWeightsDimensions));
315
316 normalizationEnabled = true;
317 }
318
telsoa01ce3e84a2018-08-31 09:31:35 +0100319 // Outputs:
320 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
321 // CIFG, or [batch_size, num_units * 3] without CIFG.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100322 AddOutputOperand<HalPolicy>(model, scratchBufferDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100323 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100324 AddOutputOperand<HalPolicy>(model, outputStateOutDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100325 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100326 AddOutputOperand<HalPolicy>(model, cellStateOutDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100327 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
328 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100329 AddOutputOperand<HalPolicy>(model, outputDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100330
331 // make the lstm operation
332 model.operations.resize(1);
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100333 model.operations[0].type = HalPolicy::OperationType::LSTM;
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100334
335 if (normalizationEnabled)
336 {
337 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
338 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26};
339 model.operations[0].outputs = hidl_vec<uint32_t> {27, 28, 29, 30};
340 }
341 else
342 {
343 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
344 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
345 model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};
346 }
telsoa01ce3e84a2018-08-31 09:31:35 +0100347
348 // define the input values
349 hidl_vec<RequestArgument> inputArguments;
350 inputArguments.resize(3);
351
352 inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
353 inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
354 inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
355
356 // define the expected output values
357 hidl_vec<RequestArgument> outputArguments;
358 outputArguments.resize(4);
359
360 outputArguments[0] = CreateRequestArgument<float>(scratchBufferValue, 3);
361 outputArguments[1] = CreateRequestArgument<float>(outputStateOutValue, 4);
362 outputArguments[2] = CreateRequestArgument<float>(cellStateOutValue, 5);
363 outputArguments[3] = CreateRequestArgument<float>(outputValue, 6);
364
365 Request request = {};
366 request.inputs = inputArguments;
367 request.outputs = outputArguments;
368
369 // set the input data
370 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
371 AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
372 AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
373
374 // add memory for the outputs
Ellen Norris-Thompson976ad3e2019-08-21 15:21:14 +0100375 AddPoolAndGetData<float>(scratchBufferValue.size(), request);
376 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData<float>(outputStateOutValue.size(), request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100377 float* outputStateOutData = static_cast<float*>(static_cast<void*>(outputStateOutMemory->getPointer()));
Ellen Norris-Thompson976ad3e2019-08-21 15:21:14 +0100378 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<float>(cellStateOutValue.size(), request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100379 float* cellStateOutData = static_cast<float*>(static_cast<void*>(cellStateOutMemory->getPointer()));
Ellen Norris-Thompson976ad3e2019-08-21 15:21:14 +0100380 android::sp<IMemory> outputMemory = AddPoolAndGetData<float>(outputValue.size(), request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100381 float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
382
383 // make the prepared model and run the execution
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100384 ExecuteModel(model, *driver, request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100385
386 // check the results
387 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
388 {
389 BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
390 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
391 }
392 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
393 {
394 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
395 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
396 }
397 for (size_t i = 0; i < outputValue.size(); ++i)
398 {
399 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
400 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
401 }
402}
403
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100404template <typename HalPolicy>
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100405void QuantizedLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
406 const std::vector<uint8_t>& inputValue,
407 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
408 const std::vector<uint8_t>& inputToInputWeightsValue,
409 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
410 const std::vector<uint8_t>& inputToForgetWeightsValue,
411 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
412 const std::vector<uint8_t>& inputToCellWeightsValue,
413 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
414 const std::vector<uint8_t>& inputToOutputWeightsValue,
415 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
416 const std::vector<uint8_t>& recurrentToInputWeightsValue,
417 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
418 const std::vector<uint8_t>& recurrentToForgetWeightsValue,
419 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
420 const std::vector<uint8_t>& recurrentToCellWeightsValue,
421 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
422 const std::vector<uint8_t>& recurrentToOutputWeightsValue,
423 const hidl_vec<uint32_t>& inputGateBiasDimensions,
424 const std::vector<int32_t>& inputGateBiasValue,
425 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
426 const std::vector<int32_t>& forgetGateBiasValue,
427 const hidl_vec<uint32_t>& cellBiasDimensions,
428 const std::vector<int32_t>& cellBiasValue,
429 const hidl_vec<uint32_t>& outputGateBiasDimensions,
430 const std::vector<int32_t>& outputGateBiasValue,
431 const hidl_vec<uint32_t>& previousOutputInDimensions,
432 const std::vector<uint8_t>& previousOutputInValue,
433 const hidl_vec<uint32_t>& previousCellStateInDimensions,
434 const std::vector<int16_t>& previousCellStateInValue,
435 const hidl_vec<uint32_t>& cellStateOutDimensions,
436 const std::vector<int16_t>& cellStateOutValue,
437 const hidl_vec<uint32_t>& outputDimensions,
438 const std::vector<uint8_t>& outputValue)
439{
440 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::GpuAcc));
441 using Model = typename HalPolicy::Model;
442 Model model = {};
443
444 float inputOutputScale = 0.0078125f;
445 int32_t inputOutputOffset = 128;
446
447 float cellStateScale = 0.00048828125f;
448 int32_t cellStateOffset = 0;
449
450 float weightsScale = 0.00408021f;
451 int32_t weightsOffset = 100;
452
453 float biasScale = 3.1876640625e-05f;
454 int32_t biasOffset = 0;
455
456 // Inputs:
457 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
458 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
459 AddInputOperand<HalPolicy>(model,
460 inputDimensions,
461 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
462 inputOutputScale,
463 inputOutputOffset);
464
465 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
466 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
467 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
468 AddTensorOperand<HalPolicy>(model,
469 inputToInputWeightsDimensions,
470 inputToInputWeightsValue,
471 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
472 CreateNoValueLifeTime(inputToInputWeightsDimensions),
473 weightsScale,
474 weightsOffset);
475 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
476 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
477 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
478 AddTensorOperand<HalPolicy>(model,
479 inputToForgetWeightsDimensions,
480 inputToForgetWeightsValue,
481 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
482 CreateNoValueLifeTime(inputToForgetWeightsDimensions),
483 weightsScale,
484 weightsOffset);
485 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
486 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
487 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
488 AddTensorOperand<HalPolicy>(model,
489 inputToCellWeightsDimensions,
490 inputToCellWeightsValue,
491 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
492 CreateNoValueLifeTime(inputToCellWeightsDimensions),
493 weightsScale,
494 weightsOffset);
495 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
496 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
497 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
498 AddTensorOperand<HalPolicy>(model,
499 inputToOutputWeightsDimensions,
500 inputToOutputWeightsValue,
501 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
502 CreateNoValueLifeTime(inputToOutputWeightsDimensions),
503 weightsScale,
504 weightsOffset);
505 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
506 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
507 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
508 AddTensorOperand<HalPolicy>(model,
509 recurrentToInputWeightsDimensions,
510 recurrentToInputWeightsValue,
511 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
512 CreateNoValueLifeTime(recurrentToInputWeightsDimensions),
513 weightsScale,
514 weightsOffset);
515 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
516 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
517 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
518 AddTensorOperand<HalPolicy>(model,
519 recurrentToForgetWeightsDimensions,
520 recurrentToForgetWeightsValue,
521 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
522 CreateNoValueLifeTime(recurrentToForgetWeightsDimensions),
523 weightsScale,
524 weightsOffset);
525 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
526 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
527 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
528 AddTensorOperand<HalPolicy>(model,
529 recurrentToCellWeightsDimensions,
530 recurrentToCellWeightsValue,
531 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
532 CreateNoValueLifeTime(recurrentToCellWeightsDimensions),
533 weightsScale,
534 weightsOffset);
535 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
536 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
537 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
538 AddTensorOperand<HalPolicy>(model,
539 recurrentToOutputWeightsDimensions,
540 recurrentToOutputWeightsValue,
541 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
542 CreateNoValueLifeTime(recurrentToOutputWeightsDimensions),
543 weightsScale,
544 weightsOffset);
545 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
546 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
547 // of input and weights scales and zeroPoint equal to 0.
548 AddTensorOperand<HalPolicy>(model,
549 inputGateBiasDimensions,
550 inputGateBiasValue,
551 HalPolicy::OperandType::TENSOR_INT32,
552 CreateNoValueLifeTime(inputGateBiasDimensions),
553 biasScale,
554 biasOffset);
555 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
556 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
557 // of input and weights scales and zeroPoint equal to 0.
558 AddTensorOperand<HalPolicy>(model,
559 forgetGateBiasDimensions,
560 forgetGateBiasValue,
561 HalPolicy::OperandType::TENSOR_INT32,
562 CreateNoValueLifeTime(forgetGateBiasDimensions),
563 biasScale,
564 biasOffset);
565 // 11: The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
566 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
567 // and weights scales and zeroPoint equal to 0.
568 AddTensorOperand<HalPolicy>(model,
569 cellBiasDimensions,
570 cellBiasValue,
571 HalPolicy::OperandType::TENSOR_INT32,
572 CreateNoValueLifeTime(cellBiasDimensions),
573 biasScale,
574 biasOffset);
575 // 12: The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
576 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
577 // of input and weights scales and zeroPoint equal to 0.
578 AddTensorOperand<HalPolicy>(model,
579 outputGateBiasDimensions,
580 outputGateBiasValue,
581 HalPolicy::OperandType::TENSOR_INT32,
582 CreateNoValueLifeTime(outputGateBiasDimensions),
583 biasScale,
584 biasOffset);
585
586 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
587 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
588 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
589 AddInputOperand<HalPolicy>(model,
590 previousCellStateInDimensions,
591 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
592 cellStateScale,
593 cellStateOffset);
594 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
595 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
596 // is quantized with a fixed quantization range of -1, 127/128.
597 AddInputOperand<HalPolicy>(model,
598 previousOutputInDimensions,
599 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
600 inputOutputScale,
601 inputOutputOffset);
602
603 // Outputs:
604 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
605 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
606 // of -2^4, 2^4 * 32767/32768.
607 AddOutputOperand<HalPolicy>(model,
608 cellStateOutDimensions,
609 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
610 cellStateScale,
611 cellStateOffset);
612 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
613 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
614 AddOutputOperand<HalPolicy>(model,
615 outputDimensions,
616 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
617 inputOutputScale,
618 inputOutputOffset);
619
620 // make the lstm operation
621 model.operations.resize(1);
622 model.operations[0].type = HalPolicy::OperationType::QUANTIZED_16BIT_LSTM;
623
624 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7,
625 8, 9, 10, 11, 12, 13, 14};
626 model.operations[0].outputs = hidl_vec<uint32_t> {15, 16};
627
628 // define the input values
629 hidl_vec<RequestArgument> inputArguments;
630 inputArguments.resize(3);
631
632 inputArguments[0] = CreateRequestArgument<uint8_t>(inputValue, 0);
633 inputArguments[1] = CreateRequestArgument<int16_t>(previousCellStateInValue, 1);
634 inputArguments[2] = CreateRequestArgument<uint8_t>(previousOutputInValue, 2);
635
636 // define the expected output values
637 hidl_vec<RequestArgument> outputArguments;
638 outputArguments.resize(2);
639
640 outputArguments[0] = CreateRequestArgument<int16_t>(cellStateOutValue, 3);
641 outputArguments[1] = CreateRequestArgument<uint8_t>(outputValue, 4);
642
643 Request request = {};
644 request.inputs = inputArguments;
645 request.outputs = outputArguments;
646
647 // set the input data
648 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
649 AddPoolAndSetData(previousCellStateInValue.size(), request, previousCellStateInValue.data());
650 AddPoolAndSetData(previousOutputInValue.size(), request, previousOutputInValue.data());
651
652 // add memory for the outputs
653 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<int16_t>(cellStateOutValue.size(), request);
654 int16_t* cellStateOutData = static_cast<int16_t*>(static_cast<void*>(cellStateOutMemory->getPointer()));
655 android::sp<IMemory> outputMemory = AddPoolAndGetData<uint8_t>(outputValue.size(), request);
656 uint8_t* outputData = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
657
658 // make the prepared model and run the execution
659 ExecuteModel(model, *driver, request);
660
661 // check the results
662 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
663 {
664 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i], 1.0f),
665 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
666 }
667 for (size_t i = 0; i < outputValue.size(); ++i)
668 {
669 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i], 1.0f),
670 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
671 }
672}
673
674template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000675void LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100676{
677 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm.model.cpp
678 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm.example.cpp
679 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
680
Matteo Martincighc7434122018-11-14 12:27:04 +0000681 uint32_t batchSize = 1;
682 uint32_t inputSize = 2;
683 uint32_t numUnits = 4;
684 uint32_t outputSize = numUnits;
685
telsoa01ce3e84a2018-08-31 09:31:35 +0100686 // Inputs:
687 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
688 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000689 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
690 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100691
692 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
693 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000694 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
695 std::vector<float> inputToInputWeightsValue{-0.45018822f, -0.02338299f,
696 -0.08705890f, -0.34550029f,
697 0.04266912f, -0.15680569f,
698 -0.34856534f, 0.43890524f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100699 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
700 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000701 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
702 std::vector<float> inputToForgetWeightsValue{ 0.09701663f, 0.20334584f,
703 -0.50592935f, -0.31343272f,
704 -0.40032279f, 0.44781327f,
705 0.01387155f, -0.35593212f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100706 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000707 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
708 std::vector<float> inputToCellWeightsValue{-0.50013041f, 0.13702840f,
709 0.11810488f, 0.20131630f,
710 -0.20583314f, 0.44344562f,
711 0.22077113f, -0.29909778f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100712 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
713 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000714 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
715 std::vector<float> inputToOutputWeightsValue{-0.25065863f, -0.28290087f,
716 0.04613829f, 0.40525138f,
717 0.44272184f, 0.03897077f,
718 -0.15568960f, 0.19487578f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100719 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
720 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
721 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000722 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
723 std::vector<float> recurrentToInputWeightsValue{-0.00635350f, -0.20423880f, 0.31454784f, -0.35746509f,
724 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f,
725 -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f,
726 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100727 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
728 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000729 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
730 std::vector<float> recurrentToForgetWeightsValue{-0.48684245f, -0.06655136f, 0.42224967f, 0.21126390f,
731 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f,
732 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f,
733 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100734 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
735 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000736 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
737 std::vector<float> recurrentToCellWeightsValue{-0.34074140f, 0.24443203f, -0.20785320f, 0.26320225f,
738 0.05695659f, -0.00123841f, -0.47447860f, -0.35869038f,
739 -0.06418842f, -0.13502428f, -0.50176400f, 0.22830659f,
740 -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100741 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
742 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000743 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
744 std::vector<float> recurrentToOutputWeightsValue{ 0.43385774f, -0.17194885f, 0.27182370f, 0.09215671f,
745 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f,
746 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f,
747 -0.51818722f, -0.15390486f, 0.04681480f, 0.39922136f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100748 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000749 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
750 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100751 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000752 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
753 std::vector<float> cellToForgetWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100754 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000755 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
756 std::vector<float> cellToOutputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100757 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000758 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
759 std::vector<float> inputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100760 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000761 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
762 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100763 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000764 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
765 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100766 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000767 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
768 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100769 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
770 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000771 hidl_vec<uint32_t> projectionWeightsDimensions{0};
772 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100773 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000774 hidl_vec<uint32_t> projectionBiasDimensions{0};
775 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100776
777 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000778 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
779 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100780 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000781 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
782 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100783
Matteo Martincighc7434122018-11-14 12:27:04 +0000784 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100785 // 20: The activation function: A value indicating the activation function:
786 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000787 hidl_vec<uint32_t> activationFunctionDimensions{};
788 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100789 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
790 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000791 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
792 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100793 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
794 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000795 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
796 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100797
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100798 // Normalization:
799 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
800 // Used to rescale normalized inputs to activation at input gate.
801 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
802 std::vector<float> inputLayerNormWeightsValue;
803 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
804 // Used to rescale normalized inputs to activation at forget gate.
805 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
806 std::vector<float> forgetLayerNormWeightsValue;
807 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
808 // Used to rescale normalized inputs to activation at cell gate.
809 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
810 std::vector<float> cellLayerNormWeightsValue;
811 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
812 // Used to rescale normalized inputs to activation at output gate.
813 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
814 std::vector<float> outputLayerNormWeightsValue;
815
telsoa01ce3e84a2018-08-31 09:31:35 +0100816 // Outputs:
817 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
818 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000819 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
820 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
821 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
822 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
823 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
824 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100825 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000826 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100827 std::vector<float> outputStateOutValue {-0.0297319f, 0.122947f, 0.208851f, -0.153588f};
828 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000829 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
telsoa01ce3e84a2018-08-31 09:31:35 +0100830 std::vector<float> cellStateOutValue {-0.145439f, 0.157475f, 0.293663f, -0.277353f};
831 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
832 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000833 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100834 std::vector<float> outputValue {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f};
835
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100836 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
837 inputToInputWeightsDimensions, inputToInputWeightsValue,
838 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
839 inputToCellWeightsDimensions, inputToCellWeightsValue,
840 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
841 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
842 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
843 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
844 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
845 cellToInputWeightsDimensions, cellToInputWeightsValue,
846 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
847 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
848 inputGateBiasDimensions, inputGateBiasValue,
849 forgetGateBiasDimensions, forgetGateBiasValue,
850 cellBiasDimensions, cellBiasValue,
851 outputGateBiasDimensions, outputGateBiasValue,
852 projectionWeightsDimensions, projectionWeightsValue,
853 projectionBiasDimensions, projectionBiasValue,
854 outputStateInDimensions, outputStateInValue,
855 cellStateInDimensions, cellStateInValue,
856 activationFunctionDimensions, activationFunctionValue,
857 cellClippingThresholdDimensions, cellClippingThresholdValue,
858 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
859 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
860 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
861 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
862 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
863 scratchBufferDimensions, scratchBufferValue,
864 outputStateOutDimensions, outputStateOutValue,
865 cellStateOutDimensions, cellStateOutValue,
866 outputDimensions, outputValue,
867 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100868}
869
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100870template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000871void LstmCifgPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100872{
873 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
874 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
875 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
876
Matteo Martincighc7434122018-11-14 12:27:04 +0000877 uint32_t batchSize = 1;
878 uint32_t inputSize = 2;
879 uint32_t numUnits = 4;
880 uint32_t outputSize = numUnits;
881
telsoa01ce3e84a2018-08-31 09:31:35 +0100882 // Inputs:
883 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
884 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000885 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
886 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100887
888 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
889 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000890 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
891 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100892 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
893 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000894 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
895 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
896 0.13056988f, -0.36333650f,
897 -0.22755712f, 0.28253698f,
898 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100899 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000900 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
901 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
902 -0.09624726f, 0.05100781f,
903 0.04717243f, 0.48944736f,
904 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100905 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
906 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000907 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
908 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
909 -0.55932593f, -0.09426838f,
910 -0.44257352f, 0.54939759f,
911 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100912 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
913 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
914 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000915 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
916 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100917 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
918 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000919 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
920 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
921 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
922 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
923 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100924 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
925 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000926 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
927 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
928 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
929 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
930 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100931 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
932 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000933 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
934 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
935 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
936 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
937 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100938 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000939 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
940 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100941 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000942 hidl_vec<uint32_t> cellToForgetWeightsDimensions{4};
943 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100944 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000945 hidl_vec<uint32_t> cellToOutputWeightsDimensions{4};
946 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100947 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000948 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
949 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100950 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000951 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
952 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100953 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000954 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
955 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100956 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000957 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
958 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100959 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
960 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000961 hidl_vec<uint32_t> projectionWeightsDimensions{0};
962 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100963 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000964 hidl_vec<uint32_t> projectionBiasDimensions{0};
965 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100966
967 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000968 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
969 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100970 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000971 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
972 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100973
Matteo Martincighc7434122018-11-14 12:27:04 +0000974 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100975 // 20: The activation function: A value indicating the activation function:
976 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000977 hidl_vec<uint32_t> activationFunctionDimensions{};
978 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100979 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
980 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000981 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
982 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100983 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
984 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000985 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
986 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100987
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100988 // Normalization:
989 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
990 // Used to rescale normalized inputs to activation at input gate.
991 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
992 std::vector<float> inputLayerNormWeightsValue;
993 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
994 // Used to rescale normalized inputs to activation at forget gate.
995 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
996 std::vector<float> forgetLayerNormWeightsValue;
997 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
998 // Used to rescale normalized inputs to activation at cell gate.
999 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1000 std::vector<float> cellLayerNormWeightsValue;
1001 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1002 // Used to rescale normalized inputs to activation at output gate.
1003 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1004 std::vector<float> outputLayerNormWeightsValue;
1005
telsoa01ce3e84a2018-08-31 09:31:35 +01001006 // Outputs:
1007 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1008 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001009 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1010 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1011 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1012 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1013 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1014 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001015 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001016 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1017 std::vector<float> outputStateOutValue{-0.364445f, -0.00352185f, 0.128866f, -0.0516365f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001018 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001019 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1020 std::vector<float> cellStateOutValue{-0.760444f, -0.0180416f, 0.182264f, -0.0649371f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001021 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1022 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001023 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1024 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001025
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001026 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1027 inputToInputWeightsDimensions, inputToInputWeightsValue,
1028 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1029 inputToCellWeightsDimensions, inputToCellWeightsValue,
1030 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1031 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1032 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1033 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1034 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1035 cellToInputWeightsDimensions, cellToInputWeightsValue,
1036 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1037 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1038 inputGateBiasDimensions, inputGateBiasValue,
1039 forgetGateBiasDimensions, forgetGateBiasValue,
1040 cellBiasDimensions, cellBiasValue,
1041 outputGateBiasDimensions, outputGateBiasValue,
1042 projectionWeightsDimensions, projectionWeightsValue,
1043 projectionBiasDimensions, projectionBiasValue,
1044 outputStateInDimensions, outputStateInValue,
1045 cellStateInDimensions, cellStateInValue,
1046 activationFunctionDimensions, activationFunctionValue,
1047 cellClippingThresholdDimensions, cellClippingThresholdValue,
1048 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1049 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1050 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1051 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1052 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1053 scratchBufferDimensions, scratchBufferValue,
1054 outputStateOutDimensions, outputStateOutValue,
1055 cellStateOutDimensions, cellStateOutValue,
1056 outputDimensions, outputValue,
1057 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +01001058}
1059
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001060template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +00001061void LstmNoCifgPeepholeProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +01001062{
1063 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm3.model.cpp
1064 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm3.example.cpp
1065 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1066
Matteo Martincighc7434122018-11-14 12:27:04 +00001067 uint32_t batchSize = 2;
1068 uint32_t inputSize = 5;
1069 uint32_t numUnits = 20;
1070 uint32_t outputSize = 16;
1071
telsoa01ce3e84a2018-08-31 09:31:35 +01001072 // Inputs:
1073 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1074 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +00001075 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1076 std::vector<float> inputValue{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1077 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001078
1079 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1080 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +00001081 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1082 std::vector<float> inputToInputWeightsValue
1083 {
1084 0.0213936830f, 0.0612455100f, 0.0469051670f, -0.0146576770f, -0.0314946300f,
1085 0.0917180300f, 0.1464780100f, 0.1079719300f, -0.0057968358f, 0.0019193048f,
1086 -0.2726754000f, 0.1015402900f, -0.0185398850f, 0.0803498850f, -0.1026238500f,
1087 -0.0225997870f, -0.0912115500f, -0.0086759670f, -0.0452061030f, -0.0821282000f,
1088 -0.0080459520f, 0.0154780810f, 0.0552172470f, 0.0387195870f, 0.0441536270f,
1089 -0.0645324300f, 0.0503182500f, -0.0469351080f, -0.0081644309f, 0.0145742260f,
1090 -0.1671009000f, -0.1551955200f, -0.1681979700f, -0.1397126900f, -0.1195305900f,
1091 0.2500548700f, -0.2279098300f, 0.0098550870f, -0.0281409580f, -0.1120069800f,
1092 0.1129540800f, -0.0035217577f, 0.0544850750f, 0.0518469500f, 0.0647112060f,
1093 0.1098919300f, 0.1167478600f, 0.0349060700f, 0.0772735700f, 0.1139058500f,
1094 -0.1863375000f, -0.1034451000f, -0.1394518900f, -0.0494012270f, -0.1876706300f,
1095 0.0424839030f, 0.1423355200f, 0.1383258100f, 0.1835016500f, 0.1454560300f,
1096 -0.0285457040f, 0.0249395310f, 0.0509297180f, 0.0076203286f, -0.0029723682f,
1097 -0.0424842240f, -0.1182759600f, -0.0917110400f, -0.1080862800f, -0.1632798800f,
1098 -0.2273378000f, -0.0993647000f, -0.0171551070f, 0.0023917493f, 0.0492727640f,
1099 0.0038534778f, 0.0547645050f, 0.0897537840f, 0.0694723400f, 0.0801447600f,
1100 -0.0454423400f, -0.0497073000f, -0.0713563100f, -0.0489291060f, -0.0040420120f,
1101 -0.0092840260f, 0.0180420540f, 0.0036860977f, -0.0742730200f, -0.1143460400f,
1102 -0.0189954560f, 0.0314875430f, 0.0128349080f, 0.0199777540f, 0.0442566540f,
1103 -0.3929261300f, -0.1851933400f, -0.1165128100f, -0.0680989200f, 0.0113736770f
1104 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001105 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1106 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001107 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1108 std::vector<float> inputToForgetWeightsValue
1109 {
1110 -0.0018401089f, -0.0048522370f, 0.0369842400f, 0.0141817040f, 0.0282732360f,
1111 -0.0167261940f, -0.0524975900f, -0.1020426100f, 0.0086106600f, -0.0409795050f,
1112 -0.0098991870f, 0.0192389200f, -0.0281772690f, -0.0853510300f, -0.1458549500f,
1113 0.1066256700f, -0.0190973100f, -0.0178835340f, -0.0047269356f, -0.0451033230f,
1114 0.0030784295f, 0.0767847750f, 0.0746369600f, 0.0945313950f, 0.0814421000f,
1115 -0.1225789900f, -0.0339457580f, -0.0313034650f, 0.0456306260f, 0.0684388700f,
1116 -0.1349294500f, -0.0124800070f, -0.0811829000f, -0.0722449900f, -0.0962879100f,
1117 0.0451009460f, 0.0012300825f, 0.0139646620f, 0.0993723940f, 0.0254305900f,
1118 0.0695832400f, 0.0342572960f, 0.0482646000f, 0.0626799700f, 0.0526250680f,
1119 0.1278466600f, 0.0707789700f, 0.0257259350f, 0.0416500900f, 0.0724190500f,
1120 0.0186686440f, -0.0373772940f, -0.0627778300f, -0.0883363600f, -0.0401206050f,
1121 -0.0114055860f, -0.0078083350f, -0.0103013860f, -0.0051021670f, 0.0277174640f,
1122 0.0548342300f, 0.1144911100f, 0.1128965200f, 0.1093983900f, 0.1339650600f,
1123 -0.0840216600f, -0.0190146200f, -0.0446783040f, -0.0772056500f, 0.0143500630f,
1124 -0.1175795800f, -0.0652038000f, -0.0818573300f, -0.0767543240f, -0.0926143750f,
1125 0.1040549100f, 0.0529603360f, 0.0357558950f, 0.0358393860f, -0.0125405530f,
1126 0.0368812980f, 0.0291337600f, 0.0342015900f, 0.0544844700f, -0.0545233530f,
1127 0.0258271500f, 0.0232735500f, -0.0118571790f, -0.0011980024f, -0.0346417170f,
1128 -0.0261250940f, -0.1758261500f, -0.1592365700f, -0.2748677400f, -0.0006143371f,
1129 0.0001771948f, -8.470171e-05f, 0.0265180700f, 0.0457907650f, 0.069564960f
1130 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001131 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001132 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1133 std::vector<float> inputToCellWeightsValue
1134 {
1135 -0.0458028300f, -0.0954946200f, -0.0324189850f, -0.0645463300f, -0.0435284530f,
1136 0.0430185870f, -0.0491523440f, -0.1241814400f, -0.0789854750f, -0.0759688900f,
1137 0.0194843620f, -0.1143496200f, -0.0074034138f, -0.0631484400f, -0.0929814950f,
1138 0.0062155537f, -0.0250343380f, -0.0028890965f, 0.0489295270f, 0.0623507500f,
1139 0.1066591800f, -0.0320367920f, -0.0850591600f, -0.1084335800f, -0.1300243300f,
1140 -0.0368164370f, -0.0213013400f, -0.0165182390f, 0.0047691227f, -0.0025825808f,
1141 0.0660178660f, 0.0299915340f, -0.1065283600f, -0.1037554000f, -0.1305607100f,
1142 -0.0326664300f, -0.0337024140f, -0.0064734240f, -0.0461169200f, 0.0144193390f,
1143 -0.0251743230f, 0.0396852000f, 0.0817775060f, 0.0615746800f, 0.1021009500f,
1144 -0.0096581940f, 0.0465117170f, 0.0360390600f, 0.0069369148f, 0.0159600950f,
1145 -0.0650766600f, 0.0955159800f, 0.0535688360f, 0.0640871400f, 0.1283566700f,
1146 -0.0087143290f, -0.2021196600f, -0.1209367400f, 0.0294504720f, 0.2849013000f,
1147 -0.0292279010f, 0.1164364000f, -0.0856026300f, 0.0994178600f, -0.0369995650f,
1148 -0.0288426260f, -0.0033637602f, -0.0170129020f, -0.0972086500f, -0.1119335100f,
1149 -0.0291551170f, -0.0179360340f, -0.0097689360f, -0.0422332400f, -0.0361596350f,
1150 0.0650511200f, -0.0217428920f, -0.0233772120f, -0.0722136400f, -0.0643055200f,
1151 0.0545386500f, 0.0911498140f, 0.0638733100f, 0.0075183930f, 0.0559609530f,
1152 0.0697793440f, 0.0464111680f, 0.1050991100f, 0.0746389400f, 0.0075130584f,
1153 0.0128509820f, 0.0455543100f, 0.0569556880f, 0.0655528500f, 0.0508014560f,
1154 -0.0098626830f, 0.0082677200f, -0.0265556090f, -0.0073611983f, -0.0014897042f
1155 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001156 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1157 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001158 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1159 std::vector<float> inputToOutputWeightsValue
1160 {
1161 -0.0998932000f, -0.0720195600f, -0.0528037730f, -0.1562959300f, -0.1500191800f,
1162 -0.0765075100f, 0.0235985500f, -0.0751553550f, -0.0803770900f, -0.1509353400f,
1163 0.0295175520f, -0.0475139300f, 0.0103505310f, -0.0266485100f, -0.0168397220f,
1164 -0.0231211630f, 0.0077019283f, 0.0128512570f, -0.0504064900f, -0.0129761000f,
1165 -0.0217377470f, -0.0383057930f, -0.0687058600f, -0.0148124700f, -0.0012853940f,
1166 0.1012423600f, 0.0831228350f, 0.0533130060f, -0.0622356460f, -0.0756371540f,
1167 -0.0278339030f, 0.0297749710f, 0.1130802000f, 0.0921890600f, 0.0950613500f,
1168 -0.0866657640f, -0.0371627060f, -0.0388809140f, -0.0358328450f, -0.0144815640f,
1169 -0.0982500300f, -0.1204856900f, -0.0976655860f, -0.0528763300f, -0.0964047000f,
1170 -0.1136642900f, 0.0357775050f, 0.1356881900f, 0.0524513830f, 0.0506493040f,
1171 0.0579895100f, -0.0218523350f, -0.0998488440f, 0.0147404750f, -0.0788979460f,
1172 0.0497469900f, 0.0141604730f, 0.0697393200f, 0.0496494200f, 0.0333646460f,
1173 0.0819012400f, 0.0255353670f, 0.0508931650f, 0.0485142540f, 0.0694581300f,
1174 -0.0789075640f, -0.0670761600f, -0.1184450800f, -0.0998668800f, -0.0750940300f,
1175 0.0626322600f, 0.1492558700f, 0.2018843600f, 0.1209845100f, 0.1463941500f,
1176 0.0015017595f, -0.0142673820f, -0.0341725700f, 0.0127114680f, 0.0028300495f,
1177 -0.0247584820f, -0.0509854800f, -0.0821182000f, 0.0142256720f, 0.0215441580f,
1178 0.0894972500f, 0.0750526800f, -0.0020780868f, 0.0490825800f, 0.0647629500f,
1179 -0.0229070630f, 0.0275624560f, 0.0401857350f, 0.0195675770f, -0.0155987390f,
1180 -0.0490973030f, -0.0171218660f, -0.0833682340f, -0.0233200200f, -0.084095600f
1181 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001182 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1183 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1184 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +00001185 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
1186 std::vector<float> recurrentToInputWeightsValue
1187 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001188 -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, // 00
1189 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
1190 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
1191 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001192 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f, // 01
1193 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001194 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001195 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001196 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f, // 02
Matteo Martincighc7434122018-11-14 12:27:04 +00001197 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001198 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001199 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001200 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f, // 03
Matteo Martincighc7434122018-11-14 12:27:04 +00001201 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001202 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
1203 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
1204 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f, // 04
1205 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
1206 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001207 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
1208 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f, // 05
1209 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
1210 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
1211 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001212 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f, // 06
1213 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001214 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001215 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
1216 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f, // 07
1217 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
1218 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
1219 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
1220 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +00001221 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001222 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001223 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001224 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f, // 09
1225 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001226 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001227 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
1228 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f, // 10
Matteo Martincighc7434122018-11-14 12:27:04 +00001229 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001230 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001231 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
1232 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f, // 11
1233 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
1234 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
1235 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
1236 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f, // 12
telsoa01ce3e84a2018-08-31 09:31:35 +01001237 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
1238 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
1239 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
1240 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f, // 13
1241 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001242 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
1243 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001244 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +00001245 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
1246 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
1247 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001248 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f, // 15
1249 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001250 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001251 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
1252 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f, // 16
Matteo Martincighc7434122018-11-14 12:27:04 +00001253 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
1254 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001255 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
1256 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +00001257 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
1258 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
1259 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
1260 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f, // 18
telsoa01ce3e84a2018-08-31 09:31:35 +01001261 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
1262 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
1263 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
1264 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, // 19
Matteo Martincighc7434122018-11-14 12:27:04 +00001265 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001266 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001267 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
1268 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001269 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1270 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001271 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1272 std::vector<float> recurrentToForgetWeightsValue
1273 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001274 -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, // 00
Matteo Martincighc7434122018-11-14 12:27:04 +00001275 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001276 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001277 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
1278 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f, // 01
telsoa01ce3e84a2018-08-31 09:31:35 +01001279 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
1280 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001281 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001282 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f, // 02
1283 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001284 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001285 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
1286 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f, // 03
1287 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001288 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
1289 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001290 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f, // 04
Matteo Martincighc7434122018-11-14 12:27:04 +00001291 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
1292 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001293 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
1294 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f, // 05
Matteo Martincighc7434122018-11-14 12:27:04 +00001295 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001296 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001297 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001298 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f, // 06
Matteo Martincighc7434122018-11-14 12:27:04 +00001299 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001300 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001301 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
1302 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f, // 07
1303 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001304 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
1305 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
1306 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +00001307 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
1308 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
1309 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
1310 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f, // 09
telsoa01ce3e84a2018-08-31 09:31:35 +01001311 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001312 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
1313 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001314 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f, // 10
1315 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
1316 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001317 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
1318 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f, // 11
1319 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
1320 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001321 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
1322 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f, // 12
Matteo Martincighc7434122018-11-14 12:27:04 +00001323 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001324 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
1325 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
1326 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f, // 13
1327 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001328 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001329 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
1330 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +00001331 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001332 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001333 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
1334 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f, // 15
1335 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
1336 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
1337 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
1338 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f, // 16
telsoa01ce3e84a2018-08-31 09:31:35 +01001339 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001340 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001341 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
1342 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +00001343 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001344 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
1345 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001346 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f, // 18
1347 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
1348 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
1349 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001350 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, // 19
1351 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001352 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
1353 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
1354 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001355 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1356 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001357 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1358 std::vector<float> recurrentToCellWeightsValue
1359 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001360 -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001361 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
1362 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001363 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001364 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
1365 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001366 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
1367 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
1368 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
1369 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001370 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001371 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
1372 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001373 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
1374 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
1375 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001376 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001377 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
1378 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001379 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001380 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
1381 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
1382 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001383 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001384 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001385 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001386 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001387 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001388 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001389 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001390 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
1391 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001392 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001393 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001394 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001395 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001396 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
1397 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
1398 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
1399 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001400 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
1401 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001402 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001403 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
1404 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
1405 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001406 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001407 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
1408 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
1409 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
1410 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001411 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001412 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001413 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001414 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001415 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
1416 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
1417 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
1418 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001419 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
1420 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001421 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
1422 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
1423 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
1424 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
1425 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001426 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
1427 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001428 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001429 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
1430 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001431 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
1432 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001433 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001434 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
1435 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001436 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001437 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
1438 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001439 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
1440 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001441 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1442 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001443 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1444 std::vector<float> recurrentToOutputWeightsValue
1445 {
1446 0.025825322f, -0.05813119f, 0.09495884f, -0.045984812f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001447 -0.01255415f, -0.0026479573f, -0.08196161f, -0.054914974f,
1448 -0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f,
1449 -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f,
1450 -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f,
1451 -0.023314456f, -0.007401714f, -0.09255757f, 0.029460307f,
1452 -0.08829125f, -0.005139627f, -0.08989442f, -0.0555066f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001453 0.13596267f, -0.025062224f, -0.048351806f, -0.03850004f,
1454 0.07266485f, -0.022414139f, 0.05940088f, 0.075114764f,
1455 0.09597592f, -0.010211725f, -0.0049794707f, -0.011523867f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001456 -0.025980417f, 0.072999895f, 0.11091378f, -0.081685916f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001457 0.014416728f, 0.043229222f, 0.034178585f, -0.07530371f,
1458 0.035837382f, -0.085607f, -0.007721233f, -0.03287832f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001459 -0.043848954f, -0.06404588f, -0.06632928f, -0.073643476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001460 0.008214239f, -0.045984086f, 0.039764922f, 0.03474462f,
1461 0.060612556f, -0.080590084f, 0.049127717f, 0.04151091f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001462 -0.030063879f, 0.008801774f, -0.023021035f, -0.019558564f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001463 0.05158114f, -0.010947698f, -0.011825728f, 0.0075720972f,
1464 0.0699727f, -0.0039981045f, 0.069350146f, 0.08799282f,
1465 0.016156472f, 0.035502106f, 0.11695009f, 0.006217345f,
1466 0.13392477f, -0.037875112f, 0.025745004f, 0.08940699f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001467 -0.00924166f, 0.0046702605f, -0.036598757f, -0.08811812f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001468 0.10522024f, -0.032441203f, 0.008176899f, -0.04454919f,
1469 0.07058152f, 0.0067963637f, 0.039206743f, 0.03259838f,
1470 0.03725492f, -0.09515802f, 0.013326398f, -0.052055415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001471 -0.025676316f, 0.03198509f, -0.015951829f, -0.058556724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001472 0.036879618f, 0.043357447f, 0.028362012f, -0.05908629f,
1473 0.0059240665f, -0.04995891f, -0.019187413f, 0.0276265f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001474 -0.01628143f, 0.0025863599f, 0.08800015f, 0.035250366f,
1475 -0.022165963f, -0.07328642f, -0.009415526f, -0.07455109f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001476 0.11690406f, 0.0363299f, 0.07411125f, 0.042103454f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001477 -0.009660886f, 0.019076364f, 0.018299393f, -0.046004917f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001478 0.08891175f, 0.0431396f, -0.026327137f, -0.051502608f,
1479 0.08979574f, -0.051670972f, 0.04940282f, -0.07491107f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001480 -0.021240504f, 0.022596184f, -0.034280192f, 0.060163025f,
1481 -0.058211457f, -0.051837247f, -0.01349775f, -0.04639988f,
1482 -0.035936575f, -0.011681591f, 0.064818054f, 0.0073146066f,
1483 -0.021745546f, -0.043124277f, -0.06471268f, -0.07053354f,
1484 -0.029321948f, -0.05330136f, 0.016933719f, -0.053782392f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001485 0.13747959f, -0.1361751f, -0.11569455f, 0.0033329215f,
1486 0.05693899f, -0.053219706f, 0.063698f, 0.07977434f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001487 -0.07924483f, 0.06936997f, 0.0034815092f, -0.007305279f,
1488 -0.037325785f, -0.07251102f, -0.033633437f, -0.08677009f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001489 0.091591336f, -0.14165086f, 0.021752775f, 0.019683983f,
1490 0.0011612234f, -0.058154266f, 0.049996935f, 0.0288841f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001491 -0.0024567875f, -0.14345716f, 0.010955264f, -0.10234828f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001492 0.1183656f, -0.0010731248f, -0.023590032f, -0.072285876f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001493 -0.0724771f, -0.026382286f, -0.0014920527f, 0.042667855f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001494 0.0018776858f, 0.02986552f, 0.009814309f, 0.0733756f,
1495 0.12289186f, 0.018043943f, -0.0458958f, 0.049412545f,
1496 0.033632483f, 0.05495232f, 0.036686596f, -0.013781798f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001497 -0.010036754f, 0.02576849f, -0.08307328f, 0.010112348f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001498 0.042521734f, -0.05869831f, -0.071689695f, 0.03876447f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001499 -0.13275425f, -0.0352966f, -0.023077697f, 0.10285965f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001500 0.084736146f, 0.15568255f, -0.00040734606f, 0.027835453f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001501 -0.10292561f, -0.032401145f, 0.10053256f, -0.026142767f,
1502 -0.08271222f, -0.0030240538f, -0.016368777f, 0.1070414f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001503 0.042672627f, 0.013456989f, -0.0437609f, -0.022309763f,
1504 0.11576483f, 0.04108048f, 0.061026827f, -0.0190714f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001505 -0.0869359f, 0.037901703f, 0.0610107f, 0.07202949f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001506 0.01675338f, 0.086139716f, -0.08795751f, -0.014898893f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001507 -0.023771819f, -0.01965048f, 0.007955471f, -0.043740474f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001508 0.03346837f, -0.10549954f, 0.090567775f, 0.042013682f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001509 -0.03176985f, 0.12569028f, -0.02421228f, -0.029526481f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001510 0.023851605f, 0.031539805f, 0.05292009f, -0.02344001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001511 -0.07811758f, -0.08834428f, 0.10094801f, 0.16594367f,
1512 -0.06861939f, -0.021256343f, -0.041093912f, -0.06669611f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001513 0.035498552f, 0.021757556f, -0.09302526f, -0.015403468f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001514 -0.06614931f, -0.051798206f, -0.013874718f, 0.03630673f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001515 0.010412845f, -0.08077351f, 0.046185967f, 0.0035662893f,
1516 0.03541868f, -0.094149634f, -0.034814864f, 0.003128424f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001517 -0.020674974f, -0.03944324f, -0.008110165f, -0.11113267f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001518 0.08484226f, 0.043586485f, 0.040582247f, 0.0968012f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001519 -0.065249965f, -0.028036479f, 0.0050708856f, 0.0017462453f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001520 0.0326779f, 0.041296225f, 0.09164146f, -0.047743853f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001521 -0.015952192f, -0.034451712f, 0.084197424f, -0.05347844f,
1522 -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001523 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001524 -0.057309967f, -0.012775832f, -0.0032452994f, 0.01977615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001525 -0.041040014f, -0.024264973f, 0.063464895f, 0.05431621f
1526 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001527 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001528 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1529 std::vector<float> cellToInputWeightsValue
1530 {
1531 0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
1532 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
1533 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f, -0.052169047f,
1534 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
1535 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001536 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001537 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1538 std::vector<float> cellToForgetWeightsValue
1539 {
1540 -0.01998659f, -0.15568835f, -0.24248174f, -0.012770197f, 0.041331276f,
1541 -0.072311886f, -0.052123554f, -0.0066330447f, -0.043891653f, 0.036225766f,
1542 -0.047248036f, 0.021479502f, 0.033189066f, 0.11952997f, -0.020432774f,
1543 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
1544 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001545 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001546 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1547 std::vector<float> cellToOutputWeightsValue
1548 {
1549 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
1550 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
1551 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
1552 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
1553 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001554 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001555 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1556 std::vector<float> inputGateBiasValue
1557 {
1558 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
1559 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
1560 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
1561 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
1562 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001563 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001564 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1565 std::vector<float> forgetGateBiasValue
1566 {
1567 0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, 0.11098921f,
1568 0.15378423f, 0.09263801f, 0.09790885f, 0.09508917f, 0.061199076f,
1569 0.07665568f, -0.015443159f, -0.03499149f, 0.046190713f, 0.08895977f,
1570 0.10899629f, 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
1571 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001572 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001573 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1574 std::vector<float> cellBiasValue
1575 {
1576 -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, -0.1483596f,
1577 -0.10639995f, -0.091433935f, 0.058573797f, -0.06809782f, -0.07889636f,
1578 -0.043246906f, -0.09829136f, -0.4279842f, 0.034901652f, 0.18797937f,
1579 0.0075234566f, 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
1580 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001581 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001582 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1583 std::vector<float> outputGateBiasValue
1584 {
1585 0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
1586 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
1587 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
1588 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
1589 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001590 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1591 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001592 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
1593 std::vector<float> projectionWeightsValue
1594 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001595 -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001596 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001597 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
1598 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001599 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
1600 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
1601 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
1602 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001603 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
1604 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
1605 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001606 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
1607 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
1608 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
1609 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
1610 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001611 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001612 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001613 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001614 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001615 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
1616 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001617 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001618 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001619 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001620 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
1621 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001622 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001623 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
1624 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
1625 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001626 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
1627 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001628 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001629 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
1630 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
1631 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
1632 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
1633 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001634 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
1635 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001636 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001637 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
1638 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001639 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
1640 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
1641 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001642 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
1643 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
1644 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001645 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001646 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001647 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
1648 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001649 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
1650 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
1651 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001652 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001653 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
1654 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
1655 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001656 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
1657 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
1658 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
1659 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001660 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001661 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1662 std::vector<float> projectionBiasValue(outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001663
1664 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001665 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1666 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001667 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001668 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1669 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001670
Matteo Martincighc7434122018-11-14 12:27:04 +00001671 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001672 // 20: The activation function: A value indicating the activation function:
1673 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001674 hidl_vec<uint32_t> activationFunctionDimensions{};
1675 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001676 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1677 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001678 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1679 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001680 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1681 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001682 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1683 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001684
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001685 // Normalization:
1686 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1687 // Used to rescale normalized inputs to activation at input gate.
1688 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1689 std::vector<float> inputLayerNormWeightsValue;
1690 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1691 // Used to rescale normalized inputs to activation at forget gate.
1692 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1693 std::vector<float> forgetLayerNormWeightsValue;
1694 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1695 // Used to rescale normalized inputs to activation at cell gate.
1696 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1697 std::vector<float> cellLayerNormWeightsValue;
1698 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1699 // Used to rescale normalized inputs to activation at output gate.
1700 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1701 std::vector<float> outputLayerNormWeightsValue;
1702
telsoa01ce3e84a2018-08-31 09:31:35 +01001703 // Outputs:
1704 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1705 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001706 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1707 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1708 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1709 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1710 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
1711 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001712 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001713 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1714 std::vector<float> outputStateOutValue
1715 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001716 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835577f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001717 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415119f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001718 -0.013869f, 0.0287268f, -0.00334694f, 0.00733397f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001719 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.0216801f
1720 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001721 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001722 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1723 std::vector<float> cellStateOutValue
1724 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001725 -0.0531632f, -0.0118138f, 0.0870833f, 0.0347929f, -0.076144f,
1726 -0.0659219f, -0.0463811f, 0.0141307f, -0.0127706f, -0.03782f,
1727 -0.00402401f, -0.00571876f, -0.187957f, -0.0247127f, 0.0711425f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001728 0.008244f, 0.0492649f, 0.126972f, 0.0933097f, 0.29848f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001729 -0.0966178f, -0.114417f, 0.0387229f, 0.0453255f, -0.181286f,
1730 -0.0651251f, -0.0996879f, -0.00276995f, 0.0617558f, -0.0100728f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001731 0.056304f, -0.077416f, -0.162858f, -0.0541251f, 0.0571202f,
1732 -0.0525331f, 0.0724297f, 0.171029f, 0.141738f, 0.295483f
1733 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001734 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1735 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001736 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1737 std::vector<float> outputValue
1738 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001739 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001740 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001741 -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001742 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f
1743 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001744
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001745 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1746 inputToInputWeightsDimensions, inputToInputWeightsValue,
1747 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1748 inputToCellWeightsDimensions, inputToCellWeightsValue,
1749 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1750 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1751 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1752 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1753 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1754 cellToInputWeightsDimensions, cellToInputWeightsValue,
1755 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1756 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1757 inputGateBiasDimensions, inputGateBiasValue,
1758 forgetGateBiasDimensions, forgetGateBiasValue,
1759 cellBiasDimensions, cellBiasValue,
1760 outputGateBiasDimensions, outputGateBiasValue,
1761 projectionWeightsDimensions, projectionWeightsValue,
1762 projectionBiasDimensions, projectionBiasValue,
1763 outputStateInDimensions, outputStateInValue,
1764 cellStateInDimensions, cellStateInValue,
1765 activationFunctionDimensions, activationFunctionValue,
1766 cellClippingThresholdDimensions, cellClippingThresholdValue,
1767 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1768 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1769 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1770 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1771 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1772 scratchBufferDimensions, scratchBufferValue,
1773 outputStateOutDimensions, outputStateOutValue,
1774 cellStateOutDimensions, cellStateOutValue,
1775 outputDimensions, outputValue,
1776 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +01001777}
1778
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001779template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +00001780void LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +01001781{
1782 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
1783 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
1784 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1785 // The batch size has been increased to 2 (it was 1 in the VTS test) with appropriate input and output values added.
1786
1787 uint32_t batchSize = 2;
1788 uint32_t inputSize = 2;
1789 uint32_t numUnits = 4;
1790 uint32_t outputSize = numUnits;
1791
1792 // Inputs:
1793 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1794 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +00001795 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1796 std::vector<float> inputValue{2.0f, 3.0f, 3.0f, 4.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001797
1798 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1799 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +00001800 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1801 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001802 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1803 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001804 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1805 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
1806 0.13056988f, -0.36333650f,
1807 -0.22755712f, 0.28253698f,
1808 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001809 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001810 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1811 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
1812 -0.09624726f, 0.05100781f,
1813 0.04717243f, 0.48944736f,
1814 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001815 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1816 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001817 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1818 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
1819 -0.55932593f, -0.09426838f,
1820 -0.44257352f, 0.54939759f,
1821 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001822 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1823 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1824 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +00001825 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
1826 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001827 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1828 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001829 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1830 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
1831 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
1832 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
1833 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001834 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1835 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001836 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1837 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
1838 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
1839 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
1840 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001841 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1842 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001843 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1844 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
1845 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
1846 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1847 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001848 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001849 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1850 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001851 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001852 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1853 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001854 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001855 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1856 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001857 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001858 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
1859 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001860 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001861 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1862 std::vector<float> forgetGateBiasValue{1.0f, 1.0f, 1.0f, 1.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001863 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001864 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1865 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001866 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001867 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1868 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001869 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1870 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001871 hidl_vec<uint32_t> projectionWeightsDimensions{0};
1872 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001873 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001874 hidl_vec<uint32_t> projectionBiasDimensions{0};
1875 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001876
1877 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001878 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1879 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001880 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001881 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1882 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001883
Matteo Martincighc7434122018-11-14 12:27:04 +00001884 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001885 // 20: The activation function: A value indicating the activation function:
1886 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001887 hidl_vec<uint32_t> activationFunctionDimensions{};
1888 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001889 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1890 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001891 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1892 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001893 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1894 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001895 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1896 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001897
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001898 // Normalization:
1899 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1900 // Used to rescale normalized inputs to activation at input gate.
1901 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1902 std::vector<float> inputLayerNormWeightsValue;
1903 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1904 // Used to rescale normalized inputs to activation at forget gate.
1905 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1906 std::vector<float> forgetLayerNormWeightsValue;
1907 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1908 // Used to rescale normalized inputs to activation at cell gate.
1909 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1910 std::vector<float> cellLayerNormWeightsValue;
1911 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1912 // Used to rescale normalized inputs to activation at output gate.
1913 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1914 std::vector<float> outputLayerNormWeightsValue;
1915
telsoa01ce3e84a2018-08-31 09:31:35 +01001916 // Outputs:
1917 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1918 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001919 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1920 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1921 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1922 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1923 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1924 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001925 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001926 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1927 std::vector<float> outputStateOutValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1928 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001929 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001930 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1931 std::vector<float> cellStateOutValue{-0.76044439f, -0.01804161f, 0.18226376f, -0.06493707f,
1932 -0.90477051f, -0.04355603f, 0.18475688f, -0.04158677f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001933 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1934 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001935 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1936 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1937 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001938
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001939 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1940 inputToInputWeightsDimensions, inputToInputWeightsValue,
1941 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1942 inputToCellWeightsDimensions, inputToCellWeightsValue,
1943 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1944 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1945 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1946 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1947 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1948 cellToInputWeightsDimensions, cellToInputWeightsValue,
1949 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1950 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1951 inputGateBiasDimensions, inputGateBiasValue,
1952 forgetGateBiasDimensions, forgetGateBiasValue,
1953 cellBiasDimensions, cellBiasValue,
1954 outputGateBiasDimensions, outputGateBiasValue,
1955 projectionWeightsDimensions, projectionWeightsValue,
1956 projectionBiasDimensions, projectionBiasValue,
1957 outputStateInDimensions, outputStateInValue,
1958 cellStateInDimensions, cellStateInValue,
1959 activationFunctionDimensions, activationFunctionValue,
1960 cellClippingThresholdDimensions, cellClippingThresholdValue,
1961 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1962 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1963 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1964 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1965 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1966 scratchBufferDimensions, scratchBufferValue,
1967 outputStateOutDimensions, outputStateOutValue,
1968 cellStateOutDimensions, cellStateOutValue,
1969 outputDimensions, outputValue,
1970 compute);
Matteo Martincighc7434122018-11-14 12:27:04 +00001971}
Matteo Martincighc7434122018-11-14 12:27:04 +00001972
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001973template <typename HalPolicy>
1974void LstmNoCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
Matteo Martincighc7434122018-11-14 12:27:04 +00001975{
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001976 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
1977 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
1978 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1979
1980 uint32_t batchSize = 2;
1981 uint32_t inputSize = 5;
1982 uint32_t numUnits = 4;
1983 uint32_t outputSize = 3;
1984
1985 // Inputs:
1986 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1987 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1988 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1989 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
1990 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
1991
1992 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1993 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1994 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1995 std::vector<float> inputToInputWeightsValue{ 0.5, 0.6, 0.7, -0.8, -0.9,
1996 0.1, 0.2, 0.3, -0.4, 0.5,
1997 -0.8, 0.7, -0.6, 0.5, -0.4,
1998 -0.5, -0.4, -0.3, -0.2, -0.1};
1999 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2000 // [num_units, input_size].
2001 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
2002 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
2003 -0.5, -0.2, -0.4, 0.3, -0.8,
2004 -0.4, 0.3, -0.5, -0.4, -0.6,
2005 0.3, -0.4, -0.6, -0.5, -0.5};
2006 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
2007 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
2008 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
2009 0.5, -0.2, -0.3, -0.2, -0.6,
2010 0.6, -0.1, -0.4, -0.3, -0.7,
2011 0.7, -0.9, -0.5, 0.8, 0.6};
2012 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2013 // [num_units, input_size].
2014 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
2015 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
2016 -0.7, 0.3, -0.3, -0.8, -0.2,
2017 0.6, -0.2, 0.4, -0.7, -0.3,
2018 -0.5, 0.1, 0.5, -0.6, -0.4};
2019 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2020 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2021 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2022 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
2023 std::vector<float> recurrentToInputWeightsValue{-0.2, -0.3, 0.4,
2024 0.1, -0.5, 0.9,
2025 -0.2, -0.3, -0.7,
2026 0.05, -0.2, -0.6};
2027 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2028 // [num_units, output_size].
2029 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
2030 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
2031 -0.2, 0.6, 0.4,
2032 0.9, 0.3, -0.1,
2033 0.2, 0.5, 0.2};
2034 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2035 // [num_units, output_size].
2036 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
2037 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
2038 -0.3, 0.8,-0.08,
2039 -0.2, 0.3, 0.8,
2040 -0.6, -0.1, 0.2};
2041 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2042 // [num_units, output_size].
2043 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
2044 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
2045 -0.2, -0.5, -0.7,
2046 -0.2, -0.6, -0.1,
2047 -0.4, -0.7, -0.2};
2048 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2049 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
2050 std::vector<float> cellToInputWeightsValue{0.05, 0.1, 0.25, 0.15};
2051 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2052 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
2053 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
2054 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2055 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
2056 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
2057 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2058 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
2059 std::vector<float> inputGateBiasValue{0.03, 0.15, 0.22, 0.38};
2060 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2061 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
2062 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
2063 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2064 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
2065 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
2066 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2067 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
2068 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
2069 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2070 // [output_size, num_units].
2071 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
2072 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
2073 -0.2, 0.1, 0.5,
2074 0.3, 0.08, 0.07,
2075 0.2, -0.4, 0.2};
2076 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2077 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2078 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2079 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2080 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2081 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2082 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2083 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2084 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2085
2086 // Constant scalar values (the VTS test adds these as tensors of dim {})
2087 // 20: The activation function: A value indicating the activation function:
2088 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2089 hidl_vec<uint32_t> activationFunctionDimensions{};
2090 std::vector<int32_t> activationFunctionValue{4};
2091 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2092 // If set to 0.0 then clipping is disabled.
2093 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2094 std::vector<float> cellClippingThresholdValue{0.0f};
2095 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2096 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2097 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2098 std::vector<float> projectionClippingThresholdValue{0.0f};
2099
2100 // Normalization:
2101 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2102 // Used to rescale normalized inputs to activation at input gate.
2103 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2104 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2105 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2106 // Used to rescale normalized inputs to activation at forget gate.
2107 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2108 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2109 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2110 // Used to rescale normalized inputs to activation at cell gate.
2111 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2112 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2113 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2114 // Used to rescale normalized inputs to activation at output gate.
2115 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2116 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2117
2118 // Outputs:
2119 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2120 // CIFG, or [batch_size, num_units * 3] without CIFG.
2121 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2122 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2123 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2124 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2125 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
2126 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
2127 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2128 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2129 std::vector<float> outputStateOutValue { 0.02440767f, 0.12802738f, -0.00170918f,
2130 -0.00692428f, 0.08487406f, 0.06344498f};
2131 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2132 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2133 std::vector<float> cellStateOutValue {-0.45177122f, 0.37691566f, 0.22542511f, 0.23240635f,
2134 -0.25258583f, 0.33042118f, 0.01730525f, 0.36660123f};
2135 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2136 // effectively the same as the current “output state (out)” value.
2137 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2138 std::vector<float> outputValue{ 0.02440767f, 0.12802738f, -0.00170918f,
2139 -0.00692428f, 0.08487406f, 0.06344498f};
2140
2141 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2142 inputToInputWeightsDimensions, inputToInputWeightsValue,
2143 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2144 inputToCellWeightsDimensions, inputToCellWeightsValue,
2145 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2146 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2147 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2148 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2149 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2150 cellToInputWeightsDimensions, cellToInputWeightsValue,
2151 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2152 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2153 inputGateBiasDimensions, inputGateBiasValue,
2154 forgetGateBiasDimensions, forgetGateBiasValue,
2155 cellBiasDimensions, cellBiasValue,
2156 outputGateBiasDimensions, outputGateBiasValue,
2157 projectionWeightsDimensions, projectionWeightsValue,
2158 projectionBiasDimensions, projectionBiasValue,
2159 outputStateInDimensions, outputStateInValue,
2160 cellStateInDimensions, cellStateInValue,
2161 activationFunctionDimensions, activationFunctionValue,
2162 cellClippingThresholdDimensions, cellClippingThresholdValue,
2163 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2164 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2165 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2166 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2167 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2168 scratchBufferDimensions, scratchBufferValue,
2169 outputStateOutDimensions, outputStateOutValue,
2170 cellStateOutDimensions, cellStateOutValue,
2171 outputDimensions, outputValue,
2172 compute);
Matteo Martincighc7434122018-11-14 12:27:04 +00002173}
2174
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002175template <typename HalPolicy>
2176void LstmCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
Matteo Martincighc7434122018-11-14 12:27:04 +00002177{
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002178 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
2179 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
2180 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
Matteo Martincighc7434122018-11-14 12:27:04 +00002181
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002182 uint32_t batchSize = 2;
2183 uint32_t inputSize = 5;
2184 uint32_t numUnits = 4;
2185 uint32_t outputSize = 3;
Matteo Martincighc7434122018-11-14 12:27:04 +00002186
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002187 // Inputs:
2188 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
2189 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
2190 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
2191 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
2192 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
telsoa01ce3e84a2018-08-31 09:31:35 +01002193
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002194 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2195 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
2196 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
2197 std::vector<float> inputToInputWeightsValue;
2198 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2199 // [num_units, input_size].
2200 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
2201 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
2202 -0.5, -0.2, -0.4, 0.3, -0.8,
2203 -0.4, 0.3, -0.5, -0.4, -0.6,
2204 0.3, -0.4, -0.6, -0.5, -0.5};
2205 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
2206 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
2207 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
2208 0.5, -0.2, -0.3, -0.2, -0.6,
2209 0.6, -0.1, -0.4, -0.3, -0.7,
2210 0.7, -0.9, -0.5, 0.8, 0.6};
2211 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2212 // [num_units, input_size].
2213 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
2214 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
2215 -0.7, 0.3, -0.3, -0.8, -0.2,
2216 0.6, -0.2, 0.4, -0.7, -0.3,
2217 -0.5, 0.1, 0.5, -0.6, -0.4};
2218 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2219 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2220 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2221 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0};
2222 std::vector<float> recurrentToInputWeightsValue;
2223 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2224 // [num_units, output_size].
2225 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
2226 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
2227 -0.2, 0.6, 0.4,
2228 0.9, 0.3, -0.1,
2229 0.2, 0.5, 0.2};
2230 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2231 // [num_units, output_size].
2232 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
2233 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
2234 -0.3, 0.8,-0.08,
2235 -0.2, 0.3, 0.8,
2236 -0.6, -0.1, 0.2};
2237 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2238 // [num_units, output_size].
2239 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
2240 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
2241 -0.2, -0.5, -0.7,
2242 -0.2, -0.6, -0.1,
2243 -0.4, -0.7, -0.2};
2244 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2245 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
2246 std::vector<float> cellToInputWeightsValue;
2247 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2248 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
2249 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
2250 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2251 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
2252 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
2253 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2254 hidl_vec<uint32_t> inputGateBiasDimensions{0};
2255 std::vector<float> inputGateBiasValue;
2256 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2257 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
2258 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
2259 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2260 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
2261 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
2262 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2263 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
2264 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
2265 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2266 // [output_size, num_units].
2267 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
2268 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
2269 -0.2, 0.1, 0.5,
2270 0.3, 0.08, 0.07,
2271 0.2, -0.4, 0.2};
2272 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2273 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2274 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2275 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2276 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2277 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2278 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2279 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2280 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2281
2282 // Constant scalar values (the VTS test adds these as tensors of dim {})
2283 // 20: The activation function: A value indicating the activation function:
2284 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2285 hidl_vec<uint32_t> activationFunctionDimensions{};
2286 std::vector<int32_t> activationFunctionValue{4};
2287 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2288 // If set to 0.0 then clipping is disabled.
2289 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2290 std::vector<float> cellClippingThresholdValue{0.0f};
2291 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2292 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2293 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2294 std::vector<float> projectionClippingThresholdValue{0.0f};
2295
2296 // Normalization:
2297 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2298 // Used to rescale normalized inputs to activation at input gate.
2299 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2300 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2301 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2302 // Used to rescale normalized inputs to activation at forget gate.
2303 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2304 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2305 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2306 // Used to rescale normalized inputs to activation at cell gate.
2307 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2308 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2309 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2310 // Used to rescale normalized inputs to activation at output gate.
2311 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2312 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2313
2314 // Outputs:
2315 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2316 // CIFG, or [batch_size, num_units * 3] without CIFG.
2317 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2318 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2319 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2320 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2321 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
2322 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
2323 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2324 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2325 std::vector<float> outputStateOutValue { 0.02129706f, 0.14081624f, 0.01127331f,
2326 -0.02263505f, 0.09169482f, 0.07691758f};
2327 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2328 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2329 std::vector<float> cellStateOutValue{-0.35102980f, 0.42610350f, 0.21463650f, 0.27716520f,
2330 -0.18855170f, 0.32522000f, 0.02036650f, 0.48967660f};
2331 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2332 // effectively the same as the current “output state (out)” value.
2333 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2334 std::vector<float> outputValue{ 0.02129706f, 0.14081624f, 0.01127331f,
2335 -0.02263505f, 0.09169482f, 0.07691758f};
2336
2337 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2338 inputToInputWeightsDimensions, inputToInputWeightsValue,
2339 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2340 inputToCellWeightsDimensions, inputToCellWeightsValue,
2341 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2342 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2343 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2344 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2345 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2346 cellToInputWeightsDimensions, cellToInputWeightsValue,
2347 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2348 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2349 inputGateBiasDimensions, inputGateBiasValue,
2350 forgetGateBiasDimensions, forgetGateBiasValue,
2351 cellBiasDimensions, cellBiasValue,
2352 outputGateBiasDimensions, outputGateBiasValue,
2353 projectionWeightsDimensions, projectionWeightsValue,
2354 projectionBiasDimensions, projectionBiasValue,
2355 outputStateInDimensions, outputStateInValue,
2356 cellStateInDimensions, cellStateInValue,
2357 activationFunctionDimensions, activationFunctionValue,
2358 cellClippingThresholdDimensions, cellClippingThresholdValue,
2359 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2360 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2361 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2362 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2363 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2364 scratchBufferDimensions, scratchBufferValue,
2365 outputStateOutDimensions, outputStateOutValue,
2366 cellStateOutDimensions, cellStateOutValue,
2367 outputDimensions, outputValue,
2368 compute);
2369}
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01002370
2371template <typename HalPolicy>
2372void QuantizedLstm(armnn::Compute compute)
2373{
2374 boost::ignore_unused(compute);
2375 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/quantized_lstm.model.cpp
2376 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
2377 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
2378
2379 uint32_t batchSize = 2;
2380 uint32_t inputSize = 2;
2381 uint32_t outputSize = 4;
2382
2383 // Inputs:
2384 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
2385 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
2386 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
2387 std::vector<uint8_t> inputValue{166, 179, 50, 150};
2388
2389 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2390 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
2391 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2392 hidl_vec<uint32_t> inputToInputWeightsDimensions{outputSize, inputSize};
2393 std::vector<uint8_t> inputToInputWeightsValue{146, 250, 235, 171, 10, 218, 171, 108};
2394 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2395 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
2396 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2397 hidl_vec<uint32_t> inputToForgetWeightsDimensions{outputSize, inputSize};
2398 std::vector<uint8_t> inputToForgetWeightsValue{24, 50, 132, 179, 158, 110, 3, 169};
2399 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2400 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
2401 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2402 hidl_vec<uint32_t> inputToCellWeightsDimensions{outputSize, inputSize};
2403 std::vector<uint8_t> inputToCellWeightsValue{133, 34, 29, 49, 206, 109, 54, 183};
2404 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2405 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
2406 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2407 hidl_vec<uint32_t> inputToOutputWeightsDimensions{outputSize, inputSize};
2408 std::vector<uint8_t> inputToOutputWeightsValue{195, 187, 11, 99, 109, 10, 218, 48};
2409 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2410 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
2411 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2412 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{outputSize, outputSize};
2413 std::vector<uint8_t> recurrentToInputWeightsValue{254, 206, 77, 168, 71, 20, 215, 6,
2414 223, 7, 118, 225, 59, 130, 174, 26};
2415 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2416 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
2417 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2418 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{outputSize, outputSize};
2419 std::vector<uint8_t> recurrentToForgetWeightsValue{137, 240, 103, 52, 68, 51, 237, 112,
2420 0, 220, 89, 23, 69, 4, 207, 253};
2421 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2422 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
2423 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2424 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{outputSize, outputSize};
2425 std::vector<uint8_t> recurrentToCellWeightsValue{172, 60, 205, 65, 14, 0, 140, 168,
2426 240, 223, 133, 56, 142, 64, 246, 216};
2427 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2428 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
2429 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2430 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{outputSize, outputSize};
2431 std::vector<uint8_t> recurrentToOutputWeightsValue{106, 214, 67, 23, 59, 158, 45, 3,
2432 119, 132, 49, 205, 129, 218, 11, 98};
2433 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
2434 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2435 // of input and weights scales and zeroPoint equal to 0.
2436 hidl_vec<uint32_t> inputGateBiasDimensions{outputSize};
2437 std::vector<int32_t> inputGateBiasValue{-7876, 13488, -726, 32839};
2438 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
2439 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2440 // of input and weights scales and zeroPoint equal to 0.
2441 hidl_vec<uint32_t> forgetGateBiasDimensions{outputSize};
2442 std::vector<int32_t> forgetGateBiasValue{9206, -46884, -11693, -38724};
2443 // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
2444 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
2445 // and weights scales and zeroPoint equal to 0.
2446 hidl_vec<uint32_t> cellBiasDimensions{outputSize};
2447 std::vector<int32_t> cellBiasValue{39481, 48624, 48976, -21419};
2448 // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
2449 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2450 // of input and weights scales and zeroPoint equal to 0.
2451 hidl_vec<uint32_t> outputGateBiasDimensions{outputSize};
2452 std::vector<int32_t> outputGateBiasValue{-58999, -17050, -41852, -40538};
2453
2454 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
2455 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
2456 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
2457 hidl_vec<uint32_t> previousCellStateInDimensions{batchSize, outputSize};
2458 std::vector<int16_t> previousCellStateInValue{876, 1034, 955, -909, 761, 1029, 796, -1036};
2459 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2460 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
2461 // is quantized with a fixed quantization range of -1, 127/128.
2462 hidl_vec<uint32_t> previousOutputInDimensions{batchSize, outputSize};
2463 std::vector<uint8_t> previousOutputInValue{136, 150, 140, 115, 135, 152, 138, 112};
2464
2465 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
2466 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
2467 // of -2^4, 2^4 * 32767/32768.
2468 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, outputSize};
2469 std::vector<int16_t> cellStateOutValue {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
2470 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
2471 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
2472 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2473 std::vector<uint8_t> outputValue {140, 151, 146, 112, 136, 156, 142, 112};
2474
2475
2476 QuantizedLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2477 inputToInputWeightsDimensions, inputToInputWeightsValue,
2478 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2479 inputToCellWeightsDimensions, inputToCellWeightsValue,
2480 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2481 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2482 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2483 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2484 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2485 inputGateBiasDimensions, inputGateBiasValue,
2486 forgetGateBiasDimensions, forgetGateBiasValue,
2487 cellBiasDimensions, cellBiasValue,
2488 outputGateBiasDimensions, outputGateBiasValue,
2489 previousOutputInDimensions, previousOutputInValue,
2490 previousCellStateInDimensions, previousCellStateInValue,
2491 cellStateOutDimensions, cellStateOutValue,
2492 outputDimensions, outputValue);
2493}