blob: 3ac095dcacf3f5c498003cd682fdf6db7345aeae [file] [log] [blame]
telsoa01ce3e84a2018-08-31 09:31:35 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa01ce3e84a2018-08-31 09:31:35 +01004//
Matteo Martincighc7434122018-11-14 12:27:04 +00005
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01006#pragma once
7
8#include "DriverTestHelpers.hpp"
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +01009
Jan Eilers0b7a4192020-03-09 18:20:42 +000010#include <armnn/utility/IgnoreUnused.hpp>
11
Matteo Martincighc7434122018-11-14 12:27:04 +000012#include <boost/array.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010013#include <boost/math/special_functions/relative_difference.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010014
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +010015using ArmnnDriver = armnn_driver::ArmnnDriver;
telsoa01ce3e84a2018-08-31 09:31:35 +010016using DriverOptions = armnn_driver::DriverOptions;
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +010017
telsoa01ce3e84a2018-08-31 09:31:35 +010018using namespace driverTestHelpers;
19using namespace android::hardware;
20
21namespace
22{
23
24template<typename T>
Matteo Martincighc7434122018-11-14 12:27:04 +000025RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
telsoa01ce3e84a2018-08-31 09:31:35 +010026{
27 DataLocation inputInloc = {};
28 inputInloc.poolIndex = poolIndex;
29 inputInloc.offset = 0;
30 inputInloc.length = value.size() * sizeof(T);
31 RequestArgument inputRequestArgument = {};
32 inputRequestArgument.location = inputInloc;
33 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
34 return inputRequestArgument;
35}
36
37// Returns true if the relative difference between two float values is less than the tolerance value given.
38// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
39bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f)
40{
41 float rd;
42 if (a == 0.0f)
43 {
44 rd = fabs(b);
45 }
46 else if (b == 0.0f)
47 {
48 rd = fabs(a);
49 }
50 else
51 {
52 rd = boost::math::relative_difference(a, b);
53 }
54 return rd < tolerance;
55}
56
Kevin Mayf29a2c52019-03-14 11:56:32 +000057// Helper function to create an OperandLifeTime::NO_VALUE for testing.
58// To be used on optional input operands that have no values - these are valid and should be tested.
Kevin Mayec1e5b82020-02-26 17:00:39 +000059V1_0::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
Kevin Mayf29a2c52019-03-14 11:56:32 +000060{
61 // Only create a NO_VALUE for optional operands that have no elements
62 if (dimensions.size() == 0 || dimensions[0] == 0)
63 {
Kevin Mayec1e5b82020-02-26 17:00:39 +000064 return V1_0::OperandLifeTime::NO_VALUE;
Kevin Mayf29a2c52019-03-14 11:56:32 +000065 }
Kevin Mayec1e5b82020-02-26 17:00:39 +000066 return V1_0::OperandLifeTime::CONSTANT_COPY;
Kevin Mayf29a2c52019-03-14 11:56:32 +000067}
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010068
69template<typename HalModel>
Kevin Mayec1e5b82020-02-26 17:00:39 +000070void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const V1_0::Request& request)
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010071{
72 android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, driver);
73 if (preparedModel.get() != nullptr)
74 {
75 Execute(preparedModel, request);
76 }
77}
78
79#ifdef ARMNN_ANDROID_NN_V1_2
80
81template<>
82void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::hal_1_2::HalPolicy::Model& model,
83 armnn_driver::ArmnnDriver& driver,
Kevin Mayec1e5b82020-02-26 17:00:39 +000084 const V1_0::Request& request)
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010085{
86 android::sp<V1_2::IPreparedModel> preparedModel = PrepareModel_1_2(model, driver);
87 if (preparedModel.get() != nullptr)
88 {
89 Execute(preparedModel, request);
90 }
91}
92
93#endif
94
Matteo Martincighc7434122018-11-14 12:27:04 +000095} // anonymous namespace
telsoa01ce3e84a2018-08-31 09:31:35 +010096
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010097#ifndef ARMCOMPUTECL_ENABLED
98static const boost::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
99#else
100static const boost::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
101#endif
telsoa01ce3e84a2018-08-31 09:31:35 +0100102
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100103// Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
104template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000105void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
106 const std::vector<float>& inputValue,
107 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
108 const std::vector<float>& inputToInputWeightsValue,
109 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
110 const std::vector<float>& inputToForgetWeightsValue,
111 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
112 const std::vector<float>& inputToCellWeightsValue,
113 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
114 const std::vector<float>& inputToOutputWeightsValue,
115 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
116 const std::vector<float>& recurrentToInputWeightsValue,
117 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
118 const std::vector<float>& recurrentToForgetWeightsValue,
119 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
120 const std::vector<float>& recurrentToCellWeightsValue,
121 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
122 const std::vector<float>& recurrentToOutputWeightsValue,
123 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
124 const std::vector<float>& cellToInputWeightsValue,
125 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
126 const std::vector<float>& cellToForgetWeightsValue,
127 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
128 const std::vector<float>& cellToOutputWeightsValue,
129 const hidl_vec<uint32_t>& inputGateBiasDimensions,
130 const std::vector<float>& inputGateBiasValue,
131 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
132 const std::vector<float>& forgetGateBiasValue,
133 const hidl_vec<uint32_t>& cellBiasDimensions,
134 const std::vector<float>& cellBiasValue,
135 const hidl_vec<uint32_t>& outputGateBiasDimensions,
136 const std::vector<float>& outputGateBiasValue,
137 const hidl_vec<uint32_t>& projectionWeightsDimensions,
138 const std::vector<float>& projectionWeightsValue,
139 const hidl_vec<uint32_t>& projectionBiasDimensions,
140 const std::vector<float>& projectionBiasValue,
141 const hidl_vec<uint32_t>& outputStateInDimensions,
142 const std::vector<float>& outputStateInValue,
143 const hidl_vec<uint32_t>& cellStateInDimensions,
144 const std::vector<float>& cellStateInValue,
145 const hidl_vec<uint32_t>& activationFunctionDimensions,
146 const std::vector<int32_t>& activationFunctionValue,
147 const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
148 const std::vector<float>& cellClippingThresholdValue,
149 const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
150 const std::vector<float>& projectionClippingThresholdValue,
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100151 const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
152 const std::vector<float>& inputLayerNormWeightsValue,
153 const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
154 const std::vector<float>& forgetLayerNormWeightsValue,
155 const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
156 const std::vector<float>& cellLayerNormWeightsValue,
157 const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
158 const std::vector<float>& outputLayerNormWeightsValue,
Matteo Martincighc7434122018-11-14 12:27:04 +0000159 const hidl_vec<uint32_t>& scratchBufferDimensions,
160 const std::vector<float>& scratchBufferValue,
161 const hidl_vec<uint32_t>& outputStateOutDimensions,
162 const std::vector<float>& outputStateOutValue,
163 const hidl_vec<uint32_t>& cellStateOutDimensions,
164 const std::vector<float>& cellStateOutValue,
165 const hidl_vec<uint32_t>& outputDimensions,
166 const std::vector<float>& outputValue,
167 armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100168{
Matteo Martincighc7434122018-11-14 12:27:04 +0000169 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100170 using Model = typename HalPolicy::Model;
171 Model model = {};
telsoa01ce3e84a2018-08-31 09:31:35 +0100172
173 // Inputs:
174 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
175 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100176 AddInputOperand<HalPolicy>(model, inputDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100177
178 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
179 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100180 AddTensorOperand<HalPolicy>(model,
181 inputToInputWeightsDimensions,
182 inputToInputWeightsValue,
183 HalPolicy::OperandType::TENSOR_FLOAT32,
184 CreateNoValueLifeTime(inputToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100185 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
186 // [num_units, input_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100187 AddTensorOperand<HalPolicy>(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
188 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
189 // [num_units, input_size].
190 AddTensorOperand<HalPolicy>(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100191 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
192 // [num_units, input_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100193 AddTensorOperand<HalPolicy>(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100194 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
195 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
196 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100197 AddTensorOperand<HalPolicy>(model,
198 recurrentToInputWeightsDimensions,
199 recurrentToInputWeightsValue,
200 HalPolicy::OperandType::TENSOR_FLOAT32,
201 CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100202 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
203 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100204 AddTensorOperand<HalPolicy>(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100205 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
206 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100207 AddTensorOperand<HalPolicy>(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100208 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
209 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100210 AddTensorOperand<HalPolicy>(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100211 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100212 AddTensorOperand<HalPolicy>(model,
213 cellToInputWeightsDimensions,
214 cellToInputWeightsValue,
215 HalPolicy::OperandType::TENSOR_FLOAT32,
216 CreateNoValueLifeTime(cellToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100217 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100218 AddTensorOperand<HalPolicy>(model,
219 cellToForgetWeightsDimensions,
220 cellToForgetWeightsValue,
221 HalPolicy::OperandType::TENSOR_FLOAT32,
222 CreateNoValueLifeTime(cellToForgetWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100223 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100224 AddTensorOperand<HalPolicy>(model,
225 cellToOutputWeightsDimensions,
226 cellToOutputWeightsValue,
227 HalPolicy::OperandType::TENSOR_FLOAT32,
228 CreateNoValueLifeTime(cellToOutputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100229 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100230 AddTensorOperand<HalPolicy>(model,
231 inputGateBiasDimensions,
232 inputGateBiasValue,
233 HalPolicy::OperandType::TENSOR_FLOAT32,
234 CreateNoValueLifeTime(inputGateBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100235 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100236 AddTensorOperand<HalPolicy>(model, forgetGateBiasDimensions, forgetGateBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100237 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100238 AddTensorOperand<HalPolicy>(model, cellBiasDimensions, cellBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100239 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100240 AddTensorOperand<HalPolicy>(model, outputGateBiasDimensions, outputGateBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100241 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
242 // [output_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100243 AddTensorOperand<HalPolicy>(model,
244 projectionWeightsDimensions,
245 projectionWeightsValue,
246 HalPolicy::OperandType::TENSOR_FLOAT32,
247 CreateNoValueLifeTime(projectionWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100248 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100249 AddTensorOperand<HalPolicy>(model,
250 projectionBiasDimensions,
251 projectionBiasValue,
252 HalPolicy::OperandType::TENSOR_FLOAT32,
253 CreateNoValueLifeTime(projectionBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100254
255 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100256 AddInputOperand<HalPolicy>(model, outputStateInDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100257 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100258 AddInputOperand<HalPolicy>(model, cellStateInDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100259
Matteo Martincighc7434122018-11-14 12:27:04 +0000260 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100261 // 20: The activation function: A value indicating the activation function:
262 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100263 AddTensorOperand<HalPolicy>(model,
264 activationFunctionDimensions,
265 activationFunctionValue,
266 HalPolicy::OperandType::INT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100267 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
268 // If set to 0.0 then clipping is disabled.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100269 AddTensorOperand<HalPolicy>(model,
270 cellClippingThresholdDimensions,
271 cellClippingThresholdValue,
272 HalPolicy::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100273 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
274 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100275 AddTensorOperand<HalPolicy>(model,
276 projectionClippingThresholdDimensions,
277 projectionClippingThresholdValue,
278 HalPolicy::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100279
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100280 bool normalizationEnabled = false;
281
282 // If any of the tensors have a value all normalization tensors are set
283 if (!inputLayerNormWeightsValue.empty() ||
284 !forgetLayerNormWeightsValue.empty() ||
285 !cellLayerNormWeightsValue.empty() ||
286 !outputLayerNormWeightsValue.empty())
287 {
288 // Normalization:
289 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
290 // Used to rescale normalized inputs to activation at input gate.
291 AddTensorOperand<HalPolicy>(model,
292 inputLayerNormWeightsDimensions,
293 inputLayerNormWeightsValue,
294 HalPolicy::OperandType::TENSOR_FLOAT32,
295 CreateNoValueLifeTime(inputLayerNormWeightsDimensions));
296 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
297 // Used to rescale normalized inputs to activation at forget gate.
298 AddTensorOperand<HalPolicy>(model,
299 forgetLayerNormWeightsDimensions,
300 forgetLayerNormWeightsValue,
301 HalPolicy::OperandType::TENSOR_FLOAT32,
302 CreateNoValueLifeTime(forgetLayerNormWeightsDimensions));
303 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
304 // Used to rescale normalized inputs to activation at cell gate.
305 AddTensorOperand<HalPolicy>(model,
306 cellLayerNormWeightsDimensions,
307 cellLayerNormWeightsValue,
308 HalPolicy::OperandType::TENSOR_FLOAT32,
309 CreateNoValueLifeTime(cellLayerNormWeightsDimensions));
310 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
311 // Used to rescale normalized inputs to activation at output gate.
312 AddTensorOperand<HalPolicy>(model,
313 outputLayerNormWeightsDimensions,
314 outputLayerNormWeightsValue,
315 HalPolicy::OperandType::TENSOR_FLOAT32,
316 CreateNoValueLifeTime(outputLayerNormWeightsDimensions));
317
318 normalizationEnabled = true;
319 }
320
telsoa01ce3e84a2018-08-31 09:31:35 +0100321 // Outputs:
322 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
323 // CIFG, or [batch_size, num_units * 3] without CIFG.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100324 AddOutputOperand<HalPolicy>(model, scratchBufferDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100325 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100326 AddOutputOperand<HalPolicy>(model, outputStateOutDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100327 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100328 AddOutputOperand<HalPolicy>(model, cellStateOutDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100329 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
330 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100331 AddOutputOperand<HalPolicy>(model, outputDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100332
333 // make the lstm operation
334 model.operations.resize(1);
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100335 model.operations[0].type = HalPolicy::OperationType::LSTM;
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100336
337 if (normalizationEnabled)
338 {
339 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
340 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26};
341 model.operations[0].outputs = hidl_vec<uint32_t> {27, 28, 29, 30};
342 }
343 else
344 {
345 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
346 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
347 model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};
348 }
telsoa01ce3e84a2018-08-31 09:31:35 +0100349
350 // define the input values
351 hidl_vec<RequestArgument> inputArguments;
352 inputArguments.resize(3);
353
354 inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
355 inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
356 inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
357
358 // define the expected output values
359 hidl_vec<RequestArgument> outputArguments;
360 outputArguments.resize(4);
361
362 outputArguments[0] = CreateRequestArgument<float>(scratchBufferValue, 3);
363 outputArguments[1] = CreateRequestArgument<float>(outputStateOutValue, 4);
364 outputArguments[2] = CreateRequestArgument<float>(cellStateOutValue, 5);
365 outputArguments[3] = CreateRequestArgument<float>(outputValue, 6);
366
Kevin Mayec1e5b82020-02-26 17:00:39 +0000367 V1_0::Request request = {};
telsoa01ce3e84a2018-08-31 09:31:35 +0100368 request.inputs = inputArguments;
369 request.outputs = outputArguments;
370
371 // set the input data
372 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
373 AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
374 AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
375
376 // add memory for the outputs
Ellen Norris-Thompson976ad3e2019-08-21 15:21:14 +0100377 AddPoolAndGetData<float>(scratchBufferValue.size(), request);
378 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData<float>(outputStateOutValue.size(), request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100379 float* outputStateOutData = static_cast<float*>(static_cast<void*>(outputStateOutMemory->getPointer()));
Ellen Norris-Thompson976ad3e2019-08-21 15:21:14 +0100380 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<float>(cellStateOutValue.size(), request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100381 float* cellStateOutData = static_cast<float*>(static_cast<void*>(cellStateOutMemory->getPointer()));
Ellen Norris-Thompson976ad3e2019-08-21 15:21:14 +0100382 android::sp<IMemory> outputMemory = AddPoolAndGetData<float>(outputValue.size(), request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100383 float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
384
385 // make the prepared model and run the execution
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100386 ExecuteModel(model, *driver, request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100387
388 // check the results
389 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
390 {
391 BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
392 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
393 }
394 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
395 {
396 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
397 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
398 }
399 for (size_t i = 0; i < outputValue.size(); ++i)
400 {
401 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
402 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
403 }
404}
405
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100406template <typename HalPolicy>
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100407void QuantizedLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
408 const std::vector<uint8_t>& inputValue,
409 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
410 const std::vector<uint8_t>& inputToInputWeightsValue,
411 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
412 const std::vector<uint8_t>& inputToForgetWeightsValue,
413 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
414 const std::vector<uint8_t>& inputToCellWeightsValue,
415 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
416 const std::vector<uint8_t>& inputToOutputWeightsValue,
417 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
418 const std::vector<uint8_t>& recurrentToInputWeightsValue,
419 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
420 const std::vector<uint8_t>& recurrentToForgetWeightsValue,
421 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
422 const std::vector<uint8_t>& recurrentToCellWeightsValue,
423 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
424 const std::vector<uint8_t>& recurrentToOutputWeightsValue,
425 const hidl_vec<uint32_t>& inputGateBiasDimensions,
426 const std::vector<int32_t>& inputGateBiasValue,
427 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
428 const std::vector<int32_t>& forgetGateBiasValue,
429 const hidl_vec<uint32_t>& cellBiasDimensions,
430 const std::vector<int32_t>& cellBiasValue,
431 const hidl_vec<uint32_t>& outputGateBiasDimensions,
432 const std::vector<int32_t>& outputGateBiasValue,
433 const hidl_vec<uint32_t>& previousOutputInDimensions,
434 const std::vector<uint8_t>& previousOutputInValue,
435 const hidl_vec<uint32_t>& previousCellStateInDimensions,
436 const std::vector<int16_t>& previousCellStateInValue,
437 const hidl_vec<uint32_t>& cellStateOutDimensions,
438 const std::vector<int16_t>& cellStateOutValue,
439 const hidl_vec<uint32_t>& outputDimensions,
440 const std::vector<uint8_t>& outputValue)
441{
442 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::GpuAcc));
443 using Model = typename HalPolicy::Model;
444 Model model = {};
445
446 float inputOutputScale = 0.0078125f;
447 int32_t inputOutputOffset = 128;
448
449 float cellStateScale = 0.00048828125f;
450 int32_t cellStateOffset = 0;
451
452 float weightsScale = 0.00408021f;
453 int32_t weightsOffset = 100;
454
455 float biasScale = 3.1876640625e-05f;
456 int32_t biasOffset = 0;
457
458 // Inputs:
459 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
460 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
461 AddInputOperand<HalPolicy>(model,
462 inputDimensions,
463 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
464 inputOutputScale,
465 inputOutputOffset);
466
467 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
468 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
469 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
470 AddTensorOperand<HalPolicy>(model,
471 inputToInputWeightsDimensions,
472 inputToInputWeightsValue,
473 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
474 CreateNoValueLifeTime(inputToInputWeightsDimensions),
475 weightsScale,
476 weightsOffset);
477 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
478 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
479 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
480 AddTensorOperand<HalPolicy>(model,
481 inputToForgetWeightsDimensions,
482 inputToForgetWeightsValue,
483 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
484 CreateNoValueLifeTime(inputToForgetWeightsDimensions),
485 weightsScale,
486 weightsOffset);
487 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
488 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
489 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
490 AddTensorOperand<HalPolicy>(model,
491 inputToCellWeightsDimensions,
492 inputToCellWeightsValue,
493 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
494 CreateNoValueLifeTime(inputToCellWeightsDimensions),
495 weightsScale,
496 weightsOffset);
497 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
498 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
499 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
500 AddTensorOperand<HalPolicy>(model,
501 inputToOutputWeightsDimensions,
502 inputToOutputWeightsValue,
503 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
504 CreateNoValueLifeTime(inputToOutputWeightsDimensions),
505 weightsScale,
506 weightsOffset);
507 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
508 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
509 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
510 AddTensorOperand<HalPolicy>(model,
511 recurrentToInputWeightsDimensions,
512 recurrentToInputWeightsValue,
513 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
514 CreateNoValueLifeTime(recurrentToInputWeightsDimensions),
515 weightsScale,
516 weightsOffset);
517 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
518 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
519 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
520 AddTensorOperand<HalPolicy>(model,
521 recurrentToForgetWeightsDimensions,
522 recurrentToForgetWeightsValue,
523 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
524 CreateNoValueLifeTime(recurrentToForgetWeightsDimensions),
525 weightsScale,
526 weightsOffset);
527 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
528 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
529 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
530 AddTensorOperand<HalPolicy>(model,
531 recurrentToCellWeightsDimensions,
532 recurrentToCellWeightsValue,
533 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
534 CreateNoValueLifeTime(recurrentToCellWeightsDimensions),
535 weightsScale,
536 weightsOffset);
537 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
538 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
539 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
540 AddTensorOperand<HalPolicy>(model,
541 recurrentToOutputWeightsDimensions,
542 recurrentToOutputWeightsValue,
543 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
544 CreateNoValueLifeTime(recurrentToOutputWeightsDimensions),
545 weightsScale,
546 weightsOffset);
547 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
548 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
549 // of input and weights scales and zeroPoint equal to 0.
550 AddTensorOperand<HalPolicy>(model,
551 inputGateBiasDimensions,
552 inputGateBiasValue,
553 HalPolicy::OperandType::TENSOR_INT32,
554 CreateNoValueLifeTime(inputGateBiasDimensions),
555 biasScale,
556 biasOffset);
557 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
558 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
559 // of input and weights scales and zeroPoint equal to 0.
560 AddTensorOperand<HalPolicy>(model,
561 forgetGateBiasDimensions,
562 forgetGateBiasValue,
563 HalPolicy::OperandType::TENSOR_INT32,
564 CreateNoValueLifeTime(forgetGateBiasDimensions),
565 biasScale,
566 biasOffset);
567 // 11: The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
568 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
569 // and weights scales and zeroPoint equal to 0.
570 AddTensorOperand<HalPolicy>(model,
571 cellBiasDimensions,
572 cellBiasValue,
573 HalPolicy::OperandType::TENSOR_INT32,
574 CreateNoValueLifeTime(cellBiasDimensions),
575 biasScale,
576 biasOffset);
577 // 12: The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
578 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
579 // of input and weights scales and zeroPoint equal to 0.
580 AddTensorOperand<HalPolicy>(model,
581 outputGateBiasDimensions,
582 outputGateBiasValue,
583 HalPolicy::OperandType::TENSOR_INT32,
584 CreateNoValueLifeTime(outputGateBiasDimensions),
585 biasScale,
586 biasOffset);
587
588 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
589 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
590 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
591 AddInputOperand<HalPolicy>(model,
592 previousCellStateInDimensions,
593 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
594 cellStateScale,
595 cellStateOffset);
596 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
597 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
598 // is quantized with a fixed quantization range of -1, 127/128.
599 AddInputOperand<HalPolicy>(model,
600 previousOutputInDimensions,
601 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
602 inputOutputScale,
603 inputOutputOffset);
604
605 // Outputs:
606 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
607 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
608 // of -2^4, 2^4 * 32767/32768.
609 AddOutputOperand<HalPolicy>(model,
610 cellStateOutDimensions,
611 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
612 cellStateScale,
613 cellStateOffset);
614 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
615 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
616 AddOutputOperand<HalPolicy>(model,
617 outputDimensions,
618 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
619 inputOutputScale,
620 inputOutputOffset);
621
622 // make the lstm operation
623 model.operations.resize(1);
624 model.operations[0].type = HalPolicy::OperationType::QUANTIZED_16BIT_LSTM;
625
626 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7,
627 8, 9, 10, 11, 12, 13, 14};
628 model.operations[0].outputs = hidl_vec<uint32_t> {15, 16};
629
630 // define the input values
631 hidl_vec<RequestArgument> inputArguments;
632 inputArguments.resize(3);
633
634 inputArguments[0] = CreateRequestArgument<uint8_t>(inputValue, 0);
635 inputArguments[1] = CreateRequestArgument<int16_t>(previousCellStateInValue, 1);
636 inputArguments[2] = CreateRequestArgument<uint8_t>(previousOutputInValue, 2);
637
638 // define the expected output values
639 hidl_vec<RequestArgument> outputArguments;
640 outputArguments.resize(2);
641
642 outputArguments[0] = CreateRequestArgument<int16_t>(cellStateOutValue, 3);
643 outputArguments[1] = CreateRequestArgument<uint8_t>(outputValue, 4);
644
Kevin Mayec1e5b82020-02-26 17:00:39 +0000645 V1_0::Request request = {};
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100646 request.inputs = inputArguments;
647 request.outputs = outputArguments;
648
649 // set the input data
650 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
651 AddPoolAndSetData(previousCellStateInValue.size(), request, previousCellStateInValue.data());
652 AddPoolAndSetData(previousOutputInValue.size(), request, previousOutputInValue.data());
653
654 // add memory for the outputs
655 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<int16_t>(cellStateOutValue.size(), request);
656 int16_t* cellStateOutData = static_cast<int16_t*>(static_cast<void*>(cellStateOutMemory->getPointer()));
657 android::sp<IMemory> outputMemory = AddPoolAndGetData<uint8_t>(outputValue.size(), request);
658 uint8_t* outputData = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
659
660 // make the prepared model and run the execution
661 ExecuteModel(model, *driver, request);
662
663 // check the results
664 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
665 {
666 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i], 1.0f),
667 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
668 }
669 for (size_t i = 0; i < outputValue.size(); ++i)
670 {
671 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i], 1.0f),
672 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
673 }
674}
675
676template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000677void LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100678{
679 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm.model.cpp
680 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm.example.cpp
681 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
682
Matteo Martincighc7434122018-11-14 12:27:04 +0000683 uint32_t batchSize = 1;
684 uint32_t inputSize = 2;
685 uint32_t numUnits = 4;
686 uint32_t outputSize = numUnits;
687
telsoa01ce3e84a2018-08-31 09:31:35 +0100688 // Inputs:
689 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
690 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000691 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
692 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100693
694 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
695 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000696 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
697 std::vector<float> inputToInputWeightsValue{-0.45018822f, -0.02338299f,
698 -0.08705890f, -0.34550029f,
699 0.04266912f, -0.15680569f,
700 -0.34856534f, 0.43890524f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100701 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
702 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000703 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
704 std::vector<float> inputToForgetWeightsValue{ 0.09701663f, 0.20334584f,
705 -0.50592935f, -0.31343272f,
706 -0.40032279f, 0.44781327f,
707 0.01387155f, -0.35593212f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100708 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000709 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
710 std::vector<float> inputToCellWeightsValue{-0.50013041f, 0.13702840f,
711 0.11810488f, 0.20131630f,
712 -0.20583314f, 0.44344562f,
713 0.22077113f, -0.29909778f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100714 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
715 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000716 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
717 std::vector<float> inputToOutputWeightsValue{-0.25065863f, -0.28290087f,
718 0.04613829f, 0.40525138f,
719 0.44272184f, 0.03897077f,
720 -0.15568960f, 0.19487578f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100721 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
722 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
723 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000724 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
725 std::vector<float> recurrentToInputWeightsValue{-0.00635350f, -0.20423880f, 0.31454784f, -0.35746509f,
726 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f,
727 -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f,
728 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100729 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
730 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000731 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
732 std::vector<float> recurrentToForgetWeightsValue{-0.48684245f, -0.06655136f, 0.42224967f, 0.21126390f,
733 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f,
734 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f,
735 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100736 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
737 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000738 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
739 std::vector<float> recurrentToCellWeightsValue{-0.34074140f, 0.24443203f, -0.20785320f, 0.26320225f,
740 0.05695659f, -0.00123841f, -0.47447860f, -0.35869038f,
741 -0.06418842f, -0.13502428f, -0.50176400f, 0.22830659f,
742 -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100743 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
744 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000745 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
746 std::vector<float> recurrentToOutputWeightsValue{ 0.43385774f, -0.17194885f, 0.27182370f, 0.09215671f,
747 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f,
748 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f,
749 -0.51818722f, -0.15390486f, 0.04681480f, 0.39922136f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100750 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000751 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
752 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100753 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000754 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
755 std::vector<float> cellToForgetWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100756 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000757 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
758 std::vector<float> cellToOutputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100759 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000760 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
761 std::vector<float> inputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100762 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000763 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
764 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100765 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000766 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
767 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100768 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000769 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
770 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100771 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
772 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000773 hidl_vec<uint32_t> projectionWeightsDimensions{0};
774 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100775 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000776 hidl_vec<uint32_t> projectionBiasDimensions{0};
777 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100778
779 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000780 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
781 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100782 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000783 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
784 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100785
Matteo Martincighc7434122018-11-14 12:27:04 +0000786 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100787 // 20: The activation function: A value indicating the activation function:
788 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000789 hidl_vec<uint32_t> activationFunctionDimensions{};
790 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100791 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
792 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000793 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
794 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100795 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
796 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000797 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
798 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100799
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100800 // Normalization:
801 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
802 // Used to rescale normalized inputs to activation at input gate.
803 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
804 std::vector<float> inputLayerNormWeightsValue;
805 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
806 // Used to rescale normalized inputs to activation at forget gate.
807 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
808 std::vector<float> forgetLayerNormWeightsValue;
809 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
810 // Used to rescale normalized inputs to activation at cell gate.
811 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
812 std::vector<float> cellLayerNormWeightsValue;
813 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
814 // Used to rescale normalized inputs to activation at output gate.
815 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
816 std::vector<float> outputLayerNormWeightsValue;
817
telsoa01ce3e84a2018-08-31 09:31:35 +0100818 // Outputs:
819 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
820 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000821 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
822 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
823 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
824 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
825 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
826 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100827 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000828 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100829 std::vector<float> outputStateOutValue {-0.0297319f, 0.122947f, 0.208851f, -0.153588f};
830 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000831 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
telsoa01ce3e84a2018-08-31 09:31:35 +0100832 std::vector<float> cellStateOutValue {-0.145439f, 0.157475f, 0.293663f, -0.277353f};
833 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
834 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000835 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100836 std::vector<float> outputValue {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f};
837
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100838 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
839 inputToInputWeightsDimensions, inputToInputWeightsValue,
840 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
841 inputToCellWeightsDimensions, inputToCellWeightsValue,
842 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
843 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
844 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
845 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
846 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
847 cellToInputWeightsDimensions, cellToInputWeightsValue,
848 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
849 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
850 inputGateBiasDimensions, inputGateBiasValue,
851 forgetGateBiasDimensions, forgetGateBiasValue,
852 cellBiasDimensions, cellBiasValue,
853 outputGateBiasDimensions, outputGateBiasValue,
854 projectionWeightsDimensions, projectionWeightsValue,
855 projectionBiasDimensions, projectionBiasValue,
856 outputStateInDimensions, outputStateInValue,
857 cellStateInDimensions, cellStateInValue,
858 activationFunctionDimensions, activationFunctionValue,
859 cellClippingThresholdDimensions, cellClippingThresholdValue,
860 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
861 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
862 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
863 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
864 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
865 scratchBufferDimensions, scratchBufferValue,
866 outputStateOutDimensions, outputStateOutValue,
867 cellStateOutDimensions, cellStateOutValue,
868 outputDimensions, outputValue,
869 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100870}
871
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100872template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000873void LstmCifgPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100874{
875 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
876 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
877 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
878
Matteo Martincighc7434122018-11-14 12:27:04 +0000879 uint32_t batchSize = 1;
880 uint32_t inputSize = 2;
881 uint32_t numUnits = 4;
882 uint32_t outputSize = numUnits;
883
telsoa01ce3e84a2018-08-31 09:31:35 +0100884 // Inputs:
885 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
886 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000887 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
888 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100889
890 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
891 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000892 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
893 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100894 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
895 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000896 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
897 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
898 0.13056988f, -0.36333650f,
899 -0.22755712f, 0.28253698f,
900 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100901 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000902 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
903 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
904 -0.09624726f, 0.05100781f,
905 0.04717243f, 0.48944736f,
906 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100907 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
908 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000909 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
910 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
911 -0.55932593f, -0.09426838f,
912 -0.44257352f, 0.54939759f,
913 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100914 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
915 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
916 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000917 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
918 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100919 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
920 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000921 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
922 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
923 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
924 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
925 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100926 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
927 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000928 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
929 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
930 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
931 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
932 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100933 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
934 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000935 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
936 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
937 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
938 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
939 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100940 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000941 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
942 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100943 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000944 hidl_vec<uint32_t> cellToForgetWeightsDimensions{4};
945 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100946 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000947 hidl_vec<uint32_t> cellToOutputWeightsDimensions{4};
948 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100949 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000950 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
951 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100952 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000953 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
954 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100955 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000956 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
957 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100958 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000959 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
960 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100961 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
962 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000963 hidl_vec<uint32_t> projectionWeightsDimensions{0};
964 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100965 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000966 hidl_vec<uint32_t> projectionBiasDimensions{0};
967 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100968
969 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000970 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
971 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100972 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000973 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
974 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100975
Matteo Martincighc7434122018-11-14 12:27:04 +0000976 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100977 // 20: The activation function: A value indicating the activation function:
978 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000979 hidl_vec<uint32_t> activationFunctionDimensions{};
980 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100981 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
982 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000983 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
984 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100985 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
986 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000987 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
988 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100989
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100990 // Normalization:
991 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
992 // Used to rescale normalized inputs to activation at input gate.
993 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
994 std::vector<float> inputLayerNormWeightsValue;
995 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
996 // Used to rescale normalized inputs to activation at forget gate.
997 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
998 std::vector<float> forgetLayerNormWeightsValue;
999 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1000 // Used to rescale normalized inputs to activation at cell gate.
1001 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1002 std::vector<float> cellLayerNormWeightsValue;
1003 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1004 // Used to rescale normalized inputs to activation at output gate.
1005 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1006 std::vector<float> outputLayerNormWeightsValue;
1007
telsoa01ce3e84a2018-08-31 09:31:35 +01001008 // Outputs:
1009 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1010 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001011 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1012 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1013 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1014 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1015 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1016 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001017 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001018 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1019 std::vector<float> outputStateOutValue{-0.364445f, -0.00352185f, 0.128866f, -0.0516365f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001020 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001021 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1022 std::vector<float> cellStateOutValue{-0.760444f, -0.0180416f, 0.182264f, -0.0649371f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001023 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1024 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001025 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1026 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001027
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001028 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1029 inputToInputWeightsDimensions, inputToInputWeightsValue,
1030 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1031 inputToCellWeightsDimensions, inputToCellWeightsValue,
1032 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1033 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1034 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1035 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1036 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1037 cellToInputWeightsDimensions, cellToInputWeightsValue,
1038 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1039 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1040 inputGateBiasDimensions, inputGateBiasValue,
1041 forgetGateBiasDimensions, forgetGateBiasValue,
1042 cellBiasDimensions, cellBiasValue,
1043 outputGateBiasDimensions, outputGateBiasValue,
1044 projectionWeightsDimensions, projectionWeightsValue,
1045 projectionBiasDimensions, projectionBiasValue,
1046 outputStateInDimensions, outputStateInValue,
1047 cellStateInDimensions, cellStateInValue,
1048 activationFunctionDimensions, activationFunctionValue,
1049 cellClippingThresholdDimensions, cellClippingThresholdValue,
1050 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1051 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1052 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1053 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1054 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1055 scratchBufferDimensions, scratchBufferValue,
1056 outputStateOutDimensions, outputStateOutValue,
1057 cellStateOutDimensions, cellStateOutValue,
1058 outputDimensions, outputValue,
1059 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +01001060}
1061
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001062template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +00001063void LstmNoCifgPeepholeProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +01001064{
1065 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm3.model.cpp
1066 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm3.example.cpp
1067 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1068
Matteo Martincighc7434122018-11-14 12:27:04 +00001069 uint32_t batchSize = 2;
1070 uint32_t inputSize = 5;
1071 uint32_t numUnits = 20;
1072 uint32_t outputSize = 16;
1073
telsoa01ce3e84a2018-08-31 09:31:35 +01001074 // Inputs:
1075 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1076 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +00001077 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1078 std::vector<float> inputValue{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1079 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001080
1081 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1082 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +00001083 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1084 std::vector<float> inputToInputWeightsValue
1085 {
1086 0.0213936830f, 0.0612455100f, 0.0469051670f, -0.0146576770f, -0.0314946300f,
1087 0.0917180300f, 0.1464780100f, 0.1079719300f, -0.0057968358f, 0.0019193048f,
1088 -0.2726754000f, 0.1015402900f, -0.0185398850f, 0.0803498850f, -0.1026238500f,
1089 -0.0225997870f, -0.0912115500f, -0.0086759670f, -0.0452061030f, -0.0821282000f,
1090 -0.0080459520f, 0.0154780810f, 0.0552172470f, 0.0387195870f, 0.0441536270f,
1091 -0.0645324300f, 0.0503182500f, -0.0469351080f, -0.0081644309f, 0.0145742260f,
1092 -0.1671009000f, -0.1551955200f, -0.1681979700f, -0.1397126900f, -0.1195305900f,
1093 0.2500548700f, -0.2279098300f, 0.0098550870f, -0.0281409580f, -0.1120069800f,
1094 0.1129540800f, -0.0035217577f, 0.0544850750f, 0.0518469500f, 0.0647112060f,
1095 0.1098919300f, 0.1167478600f, 0.0349060700f, 0.0772735700f, 0.1139058500f,
1096 -0.1863375000f, -0.1034451000f, -0.1394518900f, -0.0494012270f, -0.1876706300f,
1097 0.0424839030f, 0.1423355200f, 0.1383258100f, 0.1835016500f, 0.1454560300f,
1098 -0.0285457040f, 0.0249395310f, 0.0509297180f, 0.0076203286f, -0.0029723682f,
1099 -0.0424842240f, -0.1182759600f, -0.0917110400f, -0.1080862800f, -0.1632798800f,
1100 -0.2273378000f, -0.0993647000f, -0.0171551070f, 0.0023917493f, 0.0492727640f,
1101 0.0038534778f, 0.0547645050f, 0.0897537840f, 0.0694723400f, 0.0801447600f,
1102 -0.0454423400f, -0.0497073000f, -0.0713563100f, -0.0489291060f, -0.0040420120f,
1103 -0.0092840260f, 0.0180420540f, 0.0036860977f, -0.0742730200f, -0.1143460400f,
1104 -0.0189954560f, 0.0314875430f, 0.0128349080f, 0.0199777540f, 0.0442566540f,
1105 -0.3929261300f, -0.1851933400f, -0.1165128100f, -0.0680989200f, 0.0113736770f
1106 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001107 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1108 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001109 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1110 std::vector<float> inputToForgetWeightsValue
1111 {
1112 -0.0018401089f, -0.0048522370f, 0.0369842400f, 0.0141817040f, 0.0282732360f,
1113 -0.0167261940f, -0.0524975900f, -0.1020426100f, 0.0086106600f, -0.0409795050f,
1114 -0.0098991870f, 0.0192389200f, -0.0281772690f, -0.0853510300f, -0.1458549500f,
1115 0.1066256700f, -0.0190973100f, -0.0178835340f, -0.0047269356f, -0.0451033230f,
1116 0.0030784295f, 0.0767847750f, 0.0746369600f, 0.0945313950f, 0.0814421000f,
1117 -0.1225789900f, -0.0339457580f, -0.0313034650f, 0.0456306260f, 0.0684388700f,
1118 -0.1349294500f, -0.0124800070f, -0.0811829000f, -0.0722449900f, -0.0962879100f,
1119 0.0451009460f, 0.0012300825f, 0.0139646620f, 0.0993723940f, 0.0254305900f,
1120 0.0695832400f, 0.0342572960f, 0.0482646000f, 0.0626799700f, 0.0526250680f,
1121 0.1278466600f, 0.0707789700f, 0.0257259350f, 0.0416500900f, 0.0724190500f,
1122 0.0186686440f, -0.0373772940f, -0.0627778300f, -0.0883363600f, -0.0401206050f,
1123 -0.0114055860f, -0.0078083350f, -0.0103013860f, -0.0051021670f, 0.0277174640f,
1124 0.0548342300f, 0.1144911100f, 0.1128965200f, 0.1093983900f, 0.1339650600f,
1125 -0.0840216600f, -0.0190146200f, -0.0446783040f, -0.0772056500f, 0.0143500630f,
1126 -0.1175795800f, -0.0652038000f, -0.0818573300f, -0.0767543240f, -0.0926143750f,
1127 0.1040549100f, 0.0529603360f, 0.0357558950f, 0.0358393860f, -0.0125405530f,
1128 0.0368812980f, 0.0291337600f, 0.0342015900f, 0.0544844700f, -0.0545233530f,
1129 0.0258271500f, 0.0232735500f, -0.0118571790f, -0.0011980024f, -0.0346417170f,
1130 -0.0261250940f, -0.1758261500f, -0.1592365700f, -0.2748677400f, -0.0006143371f,
1131 0.0001771948f, -8.470171e-05f, 0.0265180700f, 0.0457907650f, 0.069564960f
1132 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001133 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001134 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1135 std::vector<float> inputToCellWeightsValue
1136 {
1137 -0.0458028300f, -0.0954946200f, -0.0324189850f, -0.0645463300f, -0.0435284530f,
1138 0.0430185870f, -0.0491523440f, -0.1241814400f, -0.0789854750f, -0.0759688900f,
1139 0.0194843620f, -0.1143496200f, -0.0074034138f, -0.0631484400f, -0.0929814950f,
1140 0.0062155537f, -0.0250343380f, -0.0028890965f, 0.0489295270f, 0.0623507500f,
1141 0.1066591800f, -0.0320367920f, -0.0850591600f, -0.1084335800f, -0.1300243300f,
1142 -0.0368164370f, -0.0213013400f, -0.0165182390f, 0.0047691227f, -0.0025825808f,
1143 0.0660178660f, 0.0299915340f, -0.1065283600f, -0.1037554000f, -0.1305607100f,
1144 -0.0326664300f, -0.0337024140f, -0.0064734240f, -0.0461169200f, 0.0144193390f,
1145 -0.0251743230f, 0.0396852000f, 0.0817775060f, 0.0615746800f, 0.1021009500f,
1146 -0.0096581940f, 0.0465117170f, 0.0360390600f, 0.0069369148f, 0.0159600950f,
1147 -0.0650766600f, 0.0955159800f, 0.0535688360f, 0.0640871400f, 0.1283566700f,
1148 -0.0087143290f, -0.2021196600f, -0.1209367400f, 0.0294504720f, 0.2849013000f,
1149 -0.0292279010f, 0.1164364000f, -0.0856026300f, 0.0994178600f, -0.0369995650f,
1150 -0.0288426260f, -0.0033637602f, -0.0170129020f, -0.0972086500f, -0.1119335100f,
1151 -0.0291551170f, -0.0179360340f, -0.0097689360f, -0.0422332400f, -0.0361596350f,
1152 0.0650511200f, -0.0217428920f, -0.0233772120f, -0.0722136400f, -0.0643055200f,
1153 0.0545386500f, 0.0911498140f, 0.0638733100f, 0.0075183930f, 0.0559609530f,
1154 0.0697793440f, 0.0464111680f, 0.1050991100f, 0.0746389400f, 0.0075130584f,
1155 0.0128509820f, 0.0455543100f, 0.0569556880f, 0.0655528500f, 0.0508014560f,
1156 -0.0098626830f, 0.0082677200f, -0.0265556090f, -0.0073611983f, -0.0014897042f
1157 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001158 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1159 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001160 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1161 std::vector<float> inputToOutputWeightsValue
1162 {
1163 -0.0998932000f, -0.0720195600f, -0.0528037730f, -0.1562959300f, -0.1500191800f,
1164 -0.0765075100f, 0.0235985500f, -0.0751553550f, -0.0803770900f, -0.1509353400f,
1165 0.0295175520f, -0.0475139300f, 0.0103505310f, -0.0266485100f, -0.0168397220f,
1166 -0.0231211630f, 0.0077019283f, 0.0128512570f, -0.0504064900f, -0.0129761000f,
1167 -0.0217377470f, -0.0383057930f, -0.0687058600f, -0.0148124700f, -0.0012853940f,
1168 0.1012423600f, 0.0831228350f, 0.0533130060f, -0.0622356460f, -0.0756371540f,
1169 -0.0278339030f, 0.0297749710f, 0.1130802000f, 0.0921890600f, 0.0950613500f,
1170 -0.0866657640f, -0.0371627060f, -0.0388809140f, -0.0358328450f, -0.0144815640f,
1171 -0.0982500300f, -0.1204856900f, -0.0976655860f, -0.0528763300f, -0.0964047000f,
1172 -0.1136642900f, 0.0357775050f, 0.1356881900f, 0.0524513830f, 0.0506493040f,
1173 0.0579895100f, -0.0218523350f, -0.0998488440f, 0.0147404750f, -0.0788979460f,
1174 0.0497469900f, 0.0141604730f, 0.0697393200f, 0.0496494200f, 0.0333646460f,
1175 0.0819012400f, 0.0255353670f, 0.0508931650f, 0.0485142540f, 0.0694581300f,
1176 -0.0789075640f, -0.0670761600f, -0.1184450800f, -0.0998668800f, -0.0750940300f,
1177 0.0626322600f, 0.1492558700f, 0.2018843600f, 0.1209845100f, 0.1463941500f,
1178 0.0015017595f, -0.0142673820f, -0.0341725700f, 0.0127114680f, 0.0028300495f,
1179 -0.0247584820f, -0.0509854800f, -0.0821182000f, 0.0142256720f, 0.0215441580f,
1180 0.0894972500f, 0.0750526800f, -0.0020780868f, 0.0490825800f, 0.0647629500f,
1181 -0.0229070630f, 0.0275624560f, 0.0401857350f, 0.0195675770f, -0.0155987390f,
1182 -0.0490973030f, -0.0171218660f, -0.0833682340f, -0.0233200200f, -0.084095600f
1183 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001184 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1185 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1186 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +00001187 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
1188 std::vector<float> recurrentToInputWeightsValue
1189 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001190 -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, // 00
1191 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
1192 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
1193 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001194 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f, // 01
1195 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001196 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001197 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001198 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f, // 02
Matteo Martincighc7434122018-11-14 12:27:04 +00001199 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001200 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001201 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001202 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f, // 03
Matteo Martincighc7434122018-11-14 12:27:04 +00001203 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001204 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
1205 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
1206 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f, // 04
1207 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
1208 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001209 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
1210 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f, // 05
1211 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
1212 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
1213 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001214 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f, // 06
1215 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001216 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001217 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
1218 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f, // 07
1219 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
1220 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
1221 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
1222 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +00001223 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001224 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001225 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001226 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f, // 09
1227 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001228 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001229 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
1230 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f, // 10
Matteo Martincighc7434122018-11-14 12:27:04 +00001231 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001232 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001233 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
1234 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f, // 11
1235 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
1236 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
1237 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
1238 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f, // 12
telsoa01ce3e84a2018-08-31 09:31:35 +01001239 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
1240 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
1241 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
1242 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f, // 13
1243 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001244 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
1245 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001246 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +00001247 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
1248 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
1249 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001250 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f, // 15
1251 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001252 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001253 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
1254 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f, // 16
Matteo Martincighc7434122018-11-14 12:27:04 +00001255 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
1256 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001257 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
1258 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +00001259 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
1260 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
1261 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
1262 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f, // 18
telsoa01ce3e84a2018-08-31 09:31:35 +01001263 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
1264 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
1265 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
1266 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, // 19
Matteo Martincighc7434122018-11-14 12:27:04 +00001267 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001268 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001269 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
1270 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001271 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1272 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001273 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1274 std::vector<float> recurrentToForgetWeightsValue
1275 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001276 -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, // 00
Matteo Martincighc7434122018-11-14 12:27:04 +00001277 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001278 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001279 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
1280 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f, // 01
telsoa01ce3e84a2018-08-31 09:31:35 +01001281 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
1282 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001283 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001284 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f, // 02
1285 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001286 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001287 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
1288 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f, // 03
1289 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001290 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
1291 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001292 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f, // 04
Matteo Martincighc7434122018-11-14 12:27:04 +00001293 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
1294 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001295 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
1296 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f, // 05
Matteo Martincighc7434122018-11-14 12:27:04 +00001297 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001298 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001299 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001300 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f, // 06
Matteo Martincighc7434122018-11-14 12:27:04 +00001301 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001302 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001303 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
1304 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f, // 07
1305 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001306 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
1307 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
1308 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +00001309 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
1310 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
1311 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
1312 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f, // 09
telsoa01ce3e84a2018-08-31 09:31:35 +01001313 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001314 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
1315 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001316 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f, // 10
1317 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
1318 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001319 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
1320 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f, // 11
1321 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
1322 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001323 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
1324 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f, // 12
Matteo Martincighc7434122018-11-14 12:27:04 +00001325 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001326 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
1327 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
1328 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f, // 13
1329 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001330 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001331 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
1332 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +00001333 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001334 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001335 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
1336 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f, // 15
1337 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
1338 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
1339 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
1340 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f, // 16
telsoa01ce3e84a2018-08-31 09:31:35 +01001341 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001342 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001343 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
1344 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +00001345 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001346 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
1347 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001348 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f, // 18
1349 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
1350 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
1351 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001352 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, // 19
1353 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001354 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
1355 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
1356 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001357 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1358 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001359 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1360 std::vector<float> recurrentToCellWeightsValue
1361 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001362 -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001363 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
1364 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001365 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001366 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
1367 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001368 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
1369 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
1370 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
1371 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001372 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001373 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
1374 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001375 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
1376 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
1377 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001378 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001379 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
1380 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001381 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001382 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
1383 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
1384 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001385 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001386 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001387 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001388 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001389 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001390 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001391 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001392 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
1393 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001394 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001395 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001396 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001397 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001398 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
1399 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
1400 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
1401 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001402 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
1403 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001404 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001405 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
1406 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
1407 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001408 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001409 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
1410 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
1411 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
1412 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001413 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001414 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001415 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001416 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001417 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
1418 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
1419 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
1420 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001421 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
1422 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001423 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
1424 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
1425 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
1426 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
1427 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001428 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
1429 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001430 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001431 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
1432 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001433 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
1434 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001435 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001436 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
1437 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001438 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001439 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
1440 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001441 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
1442 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001443 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1444 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001445 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1446 std::vector<float> recurrentToOutputWeightsValue
1447 {
1448 0.025825322f, -0.05813119f, 0.09495884f, -0.045984812f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001449 -0.01255415f, -0.0026479573f, -0.08196161f, -0.054914974f,
1450 -0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f,
1451 -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f,
1452 -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f,
1453 -0.023314456f, -0.007401714f, -0.09255757f, 0.029460307f,
1454 -0.08829125f, -0.005139627f, -0.08989442f, -0.0555066f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001455 0.13596267f, -0.025062224f, -0.048351806f, -0.03850004f,
1456 0.07266485f, -0.022414139f, 0.05940088f, 0.075114764f,
1457 0.09597592f, -0.010211725f, -0.0049794707f, -0.011523867f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001458 -0.025980417f, 0.072999895f, 0.11091378f, -0.081685916f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001459 0.014416728f, 0.043229222f, 0.034178585f, -0.07530371f,
1460 0.035837382f, -0.085607f, -0.007721233f, -0.03287832f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001461 -0.043848954f, -0.06404588f, -0.06632928f, -0.073643476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001462 0.008214239f, -0.045984086f, 0.039764922f, 0.03474462f,
1463 0.060612556f, -0.080590084f, 0.049127717f, 0.04151091f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001464 -0.030063879f, 0.008801774f, -0.023021035f, -0.019558564f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001465 0.05158114f, -0.010947698f, -0.011825728f, 0.0075720972f,
1466 0.0699727f, -0.0039981045f, 0.069350146f, 0.08799282f,
1467 0.016156472f, 0.035502106f, 0.11695009f, 0.006217345f,
1468 0.13392477f, -0.037875112f, 0.025745004f, 0.08940699f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001469 -0.00924166f, 0.0046702605f, -0.036598757f, -0.08811812f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001470 0.10522024f, -0.032441203f, 0.008176899f, -0.04454919f,
1471 0.07058152f, 0.0067963637f, 0.039206743f, 0.03259838f,
1472 0.03725492f, -0.09515802f, 0.013326398f, -0.052055415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001473 -0.025676316f, 0.03198509f, -0.015951829f, -0.058556724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001474 0.036879618f, 0.043357447f, 0.028362012f, -0.05908629f,
1475 0.0059240665f, -0.04995891f, -0.019187413f, 0.0276265f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001476 -0.01628143f, 0.0025863599f, 0.08800015f, 0.035250366f,
1477 -0.022165963f, -0.07328642f, -0.009415526f, -0.07455109f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001478 0.11690406f, 0.0363299f, 0.07411125f, 0.042103454f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001479 -0.009660886f, 0.019076364f, 0.018299393f, -0.046004917f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001480 0.08891175f, 0.0431396f, -0.026327137f, -0.051502608f,
1481 0.08979574f, -0.051670972f, 0.04940282f, -0.07491107f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001482 -0.021240504f, 0.022596184f, -0.034280192f, 0.060163025f,
1483 -0.058211457f, -0.051837247f, -0.01349775f, -0.04639988f,
1484 -0.035936575f, -0.011681591f, 0.064818054f, 0.0073146066f,
1485 -0.021745546f, -0.043124277f, -0.06471268f, -0.07053354f,
1486 -0.029321948f, -0.05330136f, 0.016933719f, -0.053782392f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001487 0.13747959f, -0.1361751f, -0.11569455f, 0.0033329215f,
1488 0.05693899f, -0.053219706f, 0.063698f, 0.07977434f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001489 -0.07924483f, 0.06936997f, 0.0034815092f, -0.007305279f,
1490 -0.037325785f, -0.07251102f, -0.033633437f, -0.08677009f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001491 0.091591336f, -0.14165086f, 0.021752775f, 0.019683983f,
1492 0.0011612234f, -0.058154266f, 0.049996935f, 0.0288841f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001493 -0.0024567875f, -0.14345716f, 0.010955264f, -0.10234828f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001494 0.1183656f, -0.0010731248f, -0.023590032f, -0.072285876f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001495 -0.0724771f, -0.026382286f, -0.0014920527f, 0.042667855f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001496 0.0018776858f, 0.02986552f, 0.009814309f, 0.0733756f,
1497 0.12289186f, 0.018043943f, -0.0458958f, 0.049412545f,
1498 0.033632483f, 0.05495232f, 0.036686596f, -0.013781798f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001499 -0.010036754f, 0.02576849f, -0.08307328f, 0.010112348f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001500 0.042521734f, -0.05869831f, -0.071689695f, 0.03876447f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001501 -0.13275425f, -0.0352966f, -0.023077697f, 0.10285965f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001502 0.084736146f, 0.15568255f, -0.00040734606f, 0.027835453f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001503 -0.10292561f, -0.032401145f, 0.10053256f, -0.026142767f,
1504 -0.08271222f, -0.0030240538f, -0.016368777f, 0.1070414f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001505 0.042672627f, 0.013456989f, -0.0437609f, -0.022309763f,
1506 0.11576483f, 0.04108048f, 0.061026827f, -0.0190714f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001507 -0.0869359f, 0.037901703f, 0.0610107f, 0.07202949f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001508 0.01675338f, 0.086139716f, -0.08795751f, -0.014898893f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001509 -0.023771819f, -0.01965048f, 0.007955471f, -0.043740474f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001510 0.03346837f, -0.10549954f, 0.090567775f, 0.042013682f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001511 -0.03176985f, 0.12569028f, -0.02421228f, -0.029526481f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001512 0.023851605f, 0.031539805f, 0.05292009f, -0.02344001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001513 -0.07811758f, -0.08834428f, 0.10094801f, 0.16594367f,
1514 -0.06861939f, -0.021256343f, -0.041093912f, -0.06669611f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001515 0.035498552f, 0.021757556f, -0.09302526f, -0.015403468f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001516 -0.06614931f, -0.051798206f, -0.013874718f, 0.03630673f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001517 0.010412845f, -0.08077351f, 0.046185967f, 0.0035662893f,
1518 0.03541868f, -0.094149634f, -0.034814864f, 0.003128424f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001519 -0.020674974f, -0.03944324f, -0.008110165f, -0.11113267f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001520 0.08484226f, 0.043586485f, 0.040582247f, 0.0968012f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001521 -0.065249965f, -0.028036479f, 0.0050708856f, 0.0017462453f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001522 0.0326779f, 0.041296225f, 0.09164146f, -0.047743853f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001523 -0.015952192f, -0.034451712f, 0.084197424f, -0.05347844f,
1524 -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001525 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001526 -0.057309967f, -0.012775832f, -0.0032452994f, 0.01977615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001527 -0.041040014f, -0.024264973f, 0.063464895f, 0.05431621f
1528 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001529 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001530 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1531 std::vector<float> cellToInputWeightsValue
1532 {
1533 0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
1534 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
1535 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f, -0.052169047f,
1536 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
1537 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001538 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001539 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1540 std::vector<float> cellToForgetWeightsValue
1541 {
1542 -0.01998659f, -0.15568835f, -0.24248174f, -0.012770197f, 0.041331276f,
1543 -0.072311886f, -0.052123554f, -0.0066330447f, -0.043891653f, 0.036225766f,
1544 -0.047248036f, 0.021479502f, 0.033189066f, 0.11952997f, -0.020432774f,
1545 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
1546 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001547 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001548 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1549 std::vector<float> cellToOutputWeightsValue
1550 {
1551 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
1552 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
1553 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
1554 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
1555 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001556 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001557 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1558 std::vector<float> inputGateBiasValue
1559 {
1560 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
1561 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
1562 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
1563 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
1564 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001565 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001566 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1567 std::vector<float> forgetGateBiasValue
1568 {
1569 0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, 0.11098921f,
1570 0.15378423f, 0.09263801f, 0.09790885f, 0.09508917f, 0.061199076f,
1571 0.07665568f, -0.015443159f, -0.03499149f, 0.046190713f, 0.08895977f,
1572 0.10899629f, 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
1573 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001574 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001575 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1576 std::vector<float> cellBiasValue
1577 {
1578 -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, -0.1483596f,
1579 -0.10639995f, -0.091433935f, 0.058573797f, -0.06809782f, -0.07889636f,
1580 -0.043246906f, -0.09829136f, -0.4279842f, 0.034901652f, 0.18797937f,
1581 0.0075234566f, 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
1582 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001583 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001584 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1585 std::vector<float> outputGateBiasValue
1586 {
1587 0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
1588 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
1589 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
1590 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
1591 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001592 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1593 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001594 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
1595 std::vector<float> projectionWeightsValue
1596 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001597 -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001598 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001599 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
1600 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001601 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
1602 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
1603 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
1604 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001605 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
1606 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
1607 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001608 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
1609 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
1610 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
1611 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
1612 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001613 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001614 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001615 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001616 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001617 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
1618 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001619 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001620 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001621 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001622 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
1623 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001624 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001625 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
1626 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
1627 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001628 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
1629 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001630 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001631 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
1632 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
1633 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
1634 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
1635 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001636 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
1637 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001638 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001639 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
1640 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001641 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
1642 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
1643 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001644 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
1645 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
1646 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001647 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001648 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001649 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
1650 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001651 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
1652 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
1653 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001654 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001655 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
1656 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
1657 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001658 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
1659 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
1660 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
1661 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001662 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001663 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1664 std::vector<float> projectionBiasValue(outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001665
1666 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001667 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1668 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001669 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001670 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1671 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001672
Matteo Martincighc7434122018-11-14 12:27:04 +00001673 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001674 // 20: The activation function: A value indicating the activation function:
1675 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001676 hidl_vec<uint32_t> activationFunctionDimensions{};
1677 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001678 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1679 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001680 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1681 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001682 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1683 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001684 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1685 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001686
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001687 // Normalization:
1688 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1689 // Used to rescale normalized inputs to activation at input gate.
1690 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1691 std::vector<float> inputLayerNormWeightsValue;
1692 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1693 // Used to rescale normalized inputs to activation at forget gate.
1694 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1695 std::vector<float> forgetLayerNormWeightsValue;
1696 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1697 // Used to rescale normalized inputs to activation at cell gate.
1698 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1699 std::vector<float> cellLayerNormWeightsValue;
1700 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1701 // Used to rescale normalized inputs to activation at output gate.
1702 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1703 std::vector<float> outputLayerNormWeightsValue;
1704
telsoa01ce3e84a2018-08-31 09:31:35 +01001705 // Outputs:
1706 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1707 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001708 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1709 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1710 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1711 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1712 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
1713 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001714 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001715 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1716 std::vector<float> outputStateOutValue
1717 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001718 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835577f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001719 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415119f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001720 -0.013869f, 0.0287268f, -0.00334694f, 0.00733397f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001721 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.0216801f
1722 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001723 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001724 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1725 std::vector<float> cellStateOutValue
1726 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001727 -0.0531632f, -0.0118138f, 0.0870833f, 0.0347929f, -0.076144f,
1728 -0.0659219f, -0.0463811f, 0.0141307f, -0.0127706f, -0.03782f,
1729 -0.00402401f, -0.00571876f, -0.187957f, -0.0247127f, 0.0711425f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001730 0.008244f, 0.0492649f, 0.126972f, 0.0933097f, 0.29848f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001731 -0.0966178f, -0.114417f, 0.0387229f, 0.0453255f, -0.181286f,
1732 -0.0651251f, -0.0996879f, -0.00276995f, 0.0617558f, -0.0100728f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001733 0.056304f, -0.077416f, -0.162858f, -0.0541251f, 0.0571202f,
1734 -0.0525331f, 0.0724297f, 0.171029f, 0.141738f, 0.295483f
1735 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001736 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1737 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001738 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1739 std::vector<float> outputValue
1740 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001741 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001742 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001743 -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001744 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f
1745 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001746
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001747 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1748 inputToInputWeightsDimensions, inputToInputWeightsValue,
1749 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1750 inputToCellWeightsDimensions, inputToCellWeightsValue,
1751 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1752 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1753 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1754 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1755 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1756 cellToInputWeightsDimensions, cellToInputWeightsValue,
1757 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1758 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1759 inputGateBiasDimensions, inputGateBiasValue,
1760 forgetGateBiasDimensions, forgetGateBiasValue,
1761 cellBiasDimensions, cellBiasValue,
1762 outputGateBiasDimensions, outputGateBiasValue,
1763 projectionWeightsDimensions, projectionWeightsValue,
1764 projectionBiasDimensions, projectionBiasValue,
1765 outputStateInDimensions, outputStateInValue,
1766 cellStateInDimensions, cellStateInValue,
1767 activationFunctionDimensions, activationFunctionValue,
1768 cellClippingThresholdDimensions, cellClippingThresholdValue,
1769 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1770 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1771 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1772 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1773 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1774 scratchBufferDimensions, scratchBufferValue,
1775 outputStateOutDimensions, outputStateOutValue,
1776 cellStateOutDimensions, cellStateOutValue,
1777 outputDimensions, outputValue,
1778 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +01001779}
1780
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001781template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +00001782void LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +01001783{
1784 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
1785 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
1786 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1787 // The batch size has been increased to 2 (it was 1 in the VTS test) with appropriate input and output values added.
1788
1789 uint32_t batchSize = 2;
1790 uint32_t inputSize = 2;
1791 uint32_t numUnits = 4;
1792 uint32_t outputSize = numUnits;
1793
1794 // Inputs:
1795 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1796 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +00001797 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1798 std::vector<float> inputValue{2.0f, 3.0f, 3.0f, 4.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001799
1800 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1801 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +00001802 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1803 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001804 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1805 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001806 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1807 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
1808 0.13056988f, -0.36333650f,
1809 -0.22755712f, 0.28253698f,
1810 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001811 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001812 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1813 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
1814 -0.09624726f, 0.05100781f,
1815 0.04717243f, 0.48944736f,
1816 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001817 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1818 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001819 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1820 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
1821 -0.55932593f, -0.09426838f,
1822 -0.44257352f, 0.54939759f,
1823 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001824 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1825 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1826 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +00001827 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
1828 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001829 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1830 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001831 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1832 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
1833 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
1834 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
1835 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001836 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1837 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001838 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1839 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
1840 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
1841 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
1842 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001843 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1844 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001845 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1846 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
1847 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
1848 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1849 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001850 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001851 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1852 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001853 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001854 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1855 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001856 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001857 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1858 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001859 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001860 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
1861 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001862 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001863 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1864 std::vector<float> forgetGateBiasValue{1.0f, 1.0f, 1.0f, 1.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001865 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001866 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1867 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001868 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001869 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1870 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001871 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1872 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001873 hidl_vec<uint32_t> projectionWeightsDimensions{0};
1874 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001875 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001876 hidl_vec<uint32_t> projectionBiasDimensions{0};
1877 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001878
1879 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001880 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1881 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001882 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001883 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1884 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001885
Matteo Martincighc7434122018-11-14 12:27:04 +00001886 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001887 // 20: The activation function: A value indicating the activation function:
1888 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001889 hidl_vec<uint32_t> activationFunctionDimensions{};
1890 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001891 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1892 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001893 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1894 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001895 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1896 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001897 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1898 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001899
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001900 // Normalization:
1901 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1902 // Used to rescale normalized inputs to activation at input gate.
1903 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1904 std::vector<float> inputLayerNormWeightsValue;
1905 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1906 // Used to rescale normalized inputs to activation at forget gate.
1907 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1908 std::vector<float> forgetLayerNormWeightsValue;
1909 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1910 // Used to rescale normalized inputs to activation at cell gate.
1911 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1912 std::vector<float> cellLayerNormWeightsValue;
1913 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1914 // Used to rescale normalized inputs to activation at output gate.
1915 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1916 std::vector<float> outputLayerNormWeightsValue;
1917
telsoa01ce3e84a2018-08-31 09:31:35 +01001918 // Outputs:
1919 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1920 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001921 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1922 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1923 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1924 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1925 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1926 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001927 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001928 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1929 std::vector<float> outputStateOutValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1930 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001931 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001932 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1933 std::vector<float> cellStateOutValue{-0.76044439f, -0.01804161f, 0.18226376f, -0.06493707f,
1934 -0.90477051f, -0.04355603f, 0.18475688f, -0.04158677f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001935 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1936 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001937 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1938 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1939 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001940
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001941 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1942 inputToInputWeightsDimensions, inputToInputWeightsValue,
1943 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1944 inputToCellWeightsDimensions, inputToCellWeightsValue,
1945 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1946 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1947 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1948 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1949 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1950 cellToInputWeightsDimensions, cellToInputWeightsValue,
1951 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1952 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1953 inputGateBiasDimensions, inputGateBiasValue,
1954 forgetGateBiasDimensions, forgetGateBiasValue,
1955 cellBiasDimensions, cellBiasValue,
1956 outputGateBiasDimensions, outputGateBiasValue,
1957 projectionWeightsDimensions, projectionWeightsValue,
1958 projectionBiasDimensions, projectionBiasValue,
1959 outputStateInDimensions, outputStateInValue,
1960 cellStateInDimensions, cellStateInValue,
1961 activationFunctionDimensions, activationFunctionValue,
1962 cellClippingThresholdDimensions, cellClippingThresholdValue,
1963 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1964 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1965 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1966 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1967 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1968 scratchBufferDimensions, scratchBufferValue,
1969 outputStateOutDimensions, outputStateOutValue,
1970 cellStateOutDimensions, cellStateOutValue,
1971 outputDimensions, outputValue,
1972 compute);
Matteo Martincighc7434122018-11-14 12:27:04 +00001973}
Matteo Martincighc7434122018-11-14 12:27:04 +00001974
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001975template <typename HalPolicy>
1976void LstmNoCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
Matteo Martincighc7434122018-11-14 12:27:04 +00001977{
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001978 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
1979 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
1980 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1981
1982 uint32_t batchSize = 2;
1983 uint32_t inputSize = 5;
1984 uint32_t numUnits = 4;
1985 uint32_t outputSize = 3;
1986
1987 // Inputs:
1988 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1989 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1990 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1991 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
1992 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
1993
1994 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1995 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1996 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1997 std::vector<float> inputToInputWeightsValue{ 0.5, 0.6, 0.7, -0.8, -0.9,
1998 0.1, 0.2, 0.3, -0.4, 0.5,
1999 -0.8, 0.7, -0.6, 0.5, -0.4,
2000 -0.5, -0.4, -0.3, -0.2, -0.1};
2001 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2002 // [num_units, input_size].
2003 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
2004 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
2005 -0.5, -0.2, -0.4, 0.3, -0.8,
2006 -0.4, 0.3, -0.5, -0.4, -0.6,
2007 0.3, -0.4, -0.6, -0.5, -0.5};
2008 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
2009 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
2010 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
2011 0.5, -0.2, -0.3, -0.2, -0.6,
2012 0.6, -0.1, -0.4, -0.3, -0.7,
2013 0.7, -0.9, -0.5, 0.8, 0.6};
2014 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2015 // [num_units, input_size].
2016 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
2017 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
2018 -0.7, 0.3, -0.3, -0.8, -0.2,
2019 0.6, -0.2, 0.4, -0.7, -0.3,
2020 -0.5, 0.1, 0.5, -0.6, -0.4};
2021 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2022 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2023 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2024 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
2025 std::vector<float> recurrentToInputWeightsValue{-0.2, -0.3, 0.4,
2026 0.1, -0.5, 0.9,
2027 -0.2, -0.3, -0.7,
2028 0.05, -0.2, -0.6};
2029 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2030 // [num_units, output_size].
2031 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
2032 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
2033 -0.2, 0.6, 0.4,
2034 0.9, 0.3, -0.1,
2035 0.2, 0.5, 0.2};
2036 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2037 // [num_units, output_size].
2038 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
2039 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
2040 -0.3, 0.8,-0.08,
2041 -0.2, 0.3, 0.8,
2042 -0.6, -0.1, 0.2};
2043 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2044 // [num_units, output_size].
2045 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
2046 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
2047 -0.2, -0.5, -0.7,
2048 -0.2, -0.6, -0.1,
2049 -0.4, -0.7, -0.2};
2050 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2051 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
2052 std::vector<float> cellToInputWeightsValue{0.05, 0.1, 0.25, 0.15};
2053 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2054 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
2055 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
2056 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2057 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
2058 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
2059 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2060 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
2061 std::vector<float> inputGateBiasValue{0.03, 0.15, 0.22, 0.38};
2062 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2063 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
2064 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
2065 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2066 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
2067 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
2068 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2069 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
2070 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
2071 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2072 // [output_size, num_units].
2073 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
2074 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
2075 -0.2, 0.1, 0.5,
2076 0.3, 0.08, 0.07,
2077 0.2, -0.4, 0.2};
2078 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2079 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2080 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2081 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2082 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2083 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2084 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2085 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2086 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2087
2088 // Constant scalar values (the VTS test adds these as tensors of dim {})
2089 // 20: The activation function: A value indicating the activation function:
2090 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2091 hidl_vec<uint32_t> activationFunctionDimensions{};
2092 std::vector<int32_t> activationFunctionValue{4};
2093 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2094 // If set to 0.0 then clipping is disabled.
2095 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2096 std::vector<float> cellClippingThresholdValue{0.0f};
2097 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2098 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2099 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2100 std::vector<float> projectionClippingThresholdValue{0.0f};
2101
2102 // Normalization:
2103 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2104 // Used to rescale normalized inputs to activation at input gate.
2105 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2106 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2107 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2108 // Used to rescale normalized inputs to activation at forget gate.
2109 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2110 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2111 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2112 // Used to rescale normalized inputs to activation at cell gate.
2113 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2114 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2115 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2116 // Used to rescale normalized inputs to activation at output gate.
2117 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2118 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2119
2120 // Outputs:
2121 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2122 // CIFG, or [batch_size, num_units * 3] without CIFG.
2123 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2124 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2125 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2126 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2127 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
2128 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
2129 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2130 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2131 std::vector<float> outputStateOutValue { 0.02440767f, 0.12802738f, -0.00170918f,
2132 -0.00692428f, 0.08487406f, 0.06344498f};
2133 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2134 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2135 std::vector<float> cellStateOutValue {-0.45177122f, 0.37691566f, 0.22542511f, 0.23240635f,
2136 -0.25258583f, 0.33042118f, 0.01730525f, 0.36660123f};
2137 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2138 // effectively the same as the current “output state (out)” value.
2139 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2140 std::vector<float> outputValue{ 0.02440767f, 0.12802738f, -0.00170918f,
2141 -0.00692428f, 0.08487406f, 0.06344498f};
2142
2143 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2144 inputToInputWeightsDimensions, inputToInputWeightsValue,
2145 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2146 inputToCellWeightsDimensions, inputToCellWeightsValue,
2147 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2148 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2149 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2150 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2151 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2152 cellToInputWeightsDimensions, cellToInputWeightsValue,
2153 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2154 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2155 inputGateBiasDimensions, inputGateBiasValue,
2156 forgetGateBiasDimensions, forgetGateBiasValue,
2157 cellBiasDimensions, cellBiasValue,
2158 outputGateBiasDimensions, outputGateBiasValue,
2159 projectionWeightsDimensions, projectionWeightsValue,
2160 projectionBiasDimensions, projectionBiasValue,
2161 outputStateInDimensions, outputStateInValue,
2162 cellStateInDimensions, cellStateInValue,
2163 activationFunctionDimensions, activationFunctionValue,
2164 cellClippingThresholdDimensions, cellClippingThresholdValue,
2165 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2166 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2167 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2168 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2169 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2170 scratchBufferDimensions, scratchBufferValue,
2171 outputStateOutDimensions, outputStateOutValue,
2172 cellStateOutDimensions, cellStateOutValue,
2173 outputDimensions, outputValue,
2174 compute);
Matteo Martincighc7434122018-11-14 12:27:04 +00002175}
2176
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002177template <typename HalPolicy>
2178void LstmCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
Matteo Martincighc7434122018-11-14 12:27:04 +00002179{
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002180 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
2181 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
2182 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
Matteo Martincighc7434122018-11-14 12:27:04 +00002183
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002184 uint32_t batchSize = 2;
2185 uint32_t inputSize = 5;
2186 uint32_t numUnits = 4;
2187 uint32_t outputSize = 3;
Matteo Martincighc7434122018-11-14 12:27:04 +00002188
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002189 // Inputs:
2190 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
2191 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
2192 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
2193 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
2194 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
telsoa01ce3e84a2018-08-31 09:31:35 +01002195
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002196 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2197 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
2198 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
2199 std::vector<float> inputToInputWeightsValue;
2200 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2201 // [num_units, input_size].
2202 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
2203 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
2204 -0.5, -0.2, -0.4, 0.3, -0.8,
2205 -0.4, 0.3, -0.5, -0.4, -0.6,
2206 0.3, -0.4, -0.6, -0.5, -0.5};
2207 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
2208 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
2209 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
2210 0.5, -0.2, -0.3, -0.2, -0.6,
2211 0.6, -0.1, -0.4, -0.3, -0.7,
2212 0.7, -0.9, -0.5, 0.8, 0.6};
2213 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2214 // [num_units, input_size].
2215 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
2216 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
2217 -0.7, 0.3, -0.3, -0.8, -0.2,
2218 0.6, -0.2, 0.4, -0.7, -0.3,
2219 -0.5, 0.1, 0.5, -0.6, -0.4};
2220 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2221 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2222 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2223 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0};
2224 std::vector<float> recurrentToInputWeightsValue;
2225 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2226 // [num_units, output_size].
2227 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
2228 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
2229 -0.2, 0.6, 0.4,
2230 0.9, 0.3, -0.1,
2231 0.2, 0.5, 0.2};
2232 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2233 // [num_units, output_size].
2234 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
2235 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
2236 -0.3, 0.8,-0.08,
2237 -0.2, 0.3, 0.8,
2238 -0.6, -0.1, 0.2};
2239 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2240 // [num_units, output_size].
2241 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
2242 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
2243 -0.2, -0.5, -0.7,
2244 -0.2, -0.6, -0.1,
2245 -0.4, -0.7, -0.2};
2246 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2247 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
2248 std::vector<float> cellToInputWeightsValue;
2249 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2250 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
2251 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
2252 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2253 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
2254 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
2255 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2256 hidl_vec<uint32_t> inputGateBiasDimensions{0};
2257 std::vector<float> inputGateBiasValue;
2258 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2259 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
2260 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
2261 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2262 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
2263 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
2264 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2265 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
2266 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
2267 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2268 // [output_size, num_units].
2269 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
2270 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
2271 -0.2, 0.1, 0.5,
2272 0.3, 0.08, 0.07,
2273 0.2, -0.4, 0.2};
2274 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2275 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2276 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2277 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2278 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2279 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2280 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2281 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2282 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2283
2284 // Constant scalar values (the VTS test adds these as tensors of dim {})
2285 // 20: The activation function: A value indicating the activation function:
2286 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2287 hidl_vec<uint32_t> activationFunctionDimensions{};
2288 std::vector<int32_t> activationFunctionValue{4};
2289 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2290 // If set to 0.0 then clipping is disabled.
2291 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2292 std::vector<float> cellClippingThresholdValue{0.0f};
2293 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2294 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2295 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2296 std::vector<float> projectionClippingThresholdValue{0.0f};
2297
2298 // Normalization:
2299 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2300 // Used to rescale normalized inputs to activation at input gate.
2301 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2302 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2303 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2304 // Used to rescale normalized inputs to activation at forget gate.
2305 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2306 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2307 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2308 // Used to rescale normalized inputs to activation at cell gate.
2309 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2310 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2311 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2312 // Used to rescale normalized inputs to activation at output gate.
2313 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2314 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2315
2316 // Outputs:
2317 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2318 // CIFG, or [batch_size, num_units * 3] without CIFG.
2319 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2320 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2321 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2322 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2323 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
2324 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
2325 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2326 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2327 std::vector<float> outputStateOutValue { 0.02129706f, 0.14081624f, 0.01127331f,
2328 -0.02263505f, 0.09169482f, 0.07691758f};
2329 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2330 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2331 std::vector<float> cellStateOutValue{-0.35102980f, 0.42610350f, 0.21463650f, 0.27716520f,
2332 -0.18855170f, 0.32522000f, 0.02036650f, 0.48967660f};
2333 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2334 // effectively the same as the current “output state (out)” value.
2335 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2336 std::vector<float> outputValue{ 0.02129706f, 0.14081624f, 0.01127331f,
2337 -0.02263505f, 0.09169482f, 0.07691758f};
2338
2339 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2340 inputToInputWeightsDimensions, inputToInputWeightsValue,
2341 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2342 inputToCellWeightsDimensions, inputToCellWeightsValue,
2343 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2344 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2345 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2346 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2347 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2348 cellToInputWeightsDimensions, cellToInputWeightsValue,
2349 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2350 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2351 inputGateBiasDimensions, inputGateBiasValue,
2352 forgetGateBiasDimensions, forgetGateBiasValue,
2353 cellBiasDimensions, cellBiasValue,
2354 outputGateBiasDimensions, outputGateBiasValue,
2355 projectionWeightsDimensions, projectionWeightsValue,
2356 projectionBiasDimensions, projectionBiasValue,
2357 outputStateInDimensions, outputStateInValue,
2358 cellStateInDimensions, cellStateInValue,
2359 activationFunctionDimensions, activationFunctionValue,
2360 cellClippingThresholdDimensions, cellClippingThresholdValue,
2361 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2362 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2363 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2364 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2365 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2366 scratchBufferDimensions, scratchBufferValue,
2367 outputStateOutDimensions, outputStateOutValue,
2368 cellStateOutDimensions, cellStateOutValue,
2369 outputDimensions, outputValue,
2370 compute);
2371}
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01002372
2373template <typename HalPolicy>
2374void QuantizedLstm(armnn::Compute compute)
2375{
Jan Eilers0b7a4192020-03-09 18:20:42 +00002376 armnn::IgnoreUnused(compute);
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01002377 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/quantized_lstm.model.cpp
2378 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
2379 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
2380
2381 uint32_t batchSize = 2;
2382 uint32_t inputSize = 2;
2383 uint32_t outputSize = 4;
2384
2385 // Inputs:
2386 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
2387 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
2388 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
2389 std::vector<uint8_t> inputValue{166, 179, 50, 150};
2390
2391 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2392 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
2393 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2394 hidl_vec<uint32_t> inputToInputWeightsDimensions{outputSize, inputSize};
2395 std::vector<uint8_t> inputToInputWeightsValue{146, 250, 235, 171, 10, 218, 171, 108};
2396 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2397 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
2398 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2399 hidl_vec<uint32_t> inputToForgetWeightsDimensions{outputSize, inputSize};
2400 std::vector<uint8_t> inputToForgetWeightsValue{24, 50, 132, 179, 158, 110, 3, 169};
2401 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2402 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
2403 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2404 hidl_vec<uint32_t> inputToCellWeightsDimensions{outputSize, inputSize};
2405 std::vector<uint8_t> inputToCellWeightsValue{133, 34, 29, 49, 206, 109, 54, 183};
2406 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2407 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
2408 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2409 hidl_vec<uint32_t> inputToOutputWeightsDimensions{outputSize, inputSize};
2410 std::vector<uint8_t> inputToOutputWeightsValue{195, 187, 11, 99, 109, 10, 218, 48};
2411 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2412 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
2413 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2414 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{outputSize, outputSize};
2415 std::vector<uint8_t> recurrentToInputWeightsValue{254, 206, 77, 168, 71, 20, 215, 6,
2416 223, 7, 118, 225, 59, 130, 174, 26};
2417 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2418 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
2419 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2420 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{outputSize, outputSize};
2421 std::vector<uint8_t> recurrentToForgetWeightsValue{137, 240, 103, 52, 68, 51, 237, 112,
2422 0, 220, 89, 23, 69, 4, 207, 253};
2423 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2424 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
2425 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2426 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{outputSize, outputSize};
2427 std::vector<uint8_t> recurrentToCellWeightsValue{172, 60, 205, 65, 14, 0, 140, 168,
2428 240, 223, 133, 56, 142, 64, 246, 216};
2429 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2430 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
2431 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2432 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{outputSize, outputSize};
2433 std::vector<uint8_t> recurrentToOutputWeightsValue{106, 214, 67, 23, 59, 158, 45, 3,
2434 119, 132, 49, 205, 129, 218, 11, 98};
2435 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
2436 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2437 // of input and weights scales and zeroPoint equal to 0.
2438 hidl_vec<uint32_t> inputGateBiasDimensions{outputSize};
2439 std::vector<int32_t> inputGateBiasValue{-7876, 13488, -726, 32839};
2440 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
2441 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2442 // of input and weights scales and zeroPoint equal to 0.
2443 hidl_vec<uint32_t> forgetGateBiasDimensions{outputSize};
2444 std::vector<int32_t> forgetGateBiasValue{9206, -46884, -11693, -38724};
2445 // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
2446 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
2447 // and weights scales and zeroPoint equal to 0.
2448 hidl_vec<uint32_t> cellBiasDimensions{outputSize};
2449 std::vector<int32_t> cellBiasValue{39481, 48624, 48976, -21419};
2450 // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
2451 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2452 // of input and weights scales and zeroPoint equal to 0.
2453 hidl_vec<uint32_t> outputGateBiasDimensions{outputSize};
2454 std::vector<int32_t> outputGateBiasValue{-58999, -17050, -41852, -40538};
2455
2456 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
2457 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
2458 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
2459 hidl_vec<uint32_t> previousCellStateInDimensions{batchSize, outputSize};
2460 std::vector<int16_t> previousCellStateInValue{876, 1034, 955, -909, 761, 1029, 796, -1036};
2461 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2462 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
2463 // is quantized with a fixed quantization range of -1, 127/128.
2464 hidl_vec<uint32_t> previousOutputInDimensions{batchSize, outputSize};
2465 std::vector<uint8_t> previousOutputInValue{136, 150, 140, 115, 135, 152, 138, 112};
2466
2467 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
2468 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
2469 // of -2^4, 2^4 * 32767/32768.
2470 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, outputSize};
2471 std::vector<int16_t> cellStateOutValue {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
2472 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
2473 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
2474 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2475 std::vector<uint8_t> outputValue {140, 151, 146, 112, 136, 156, 142, 112};
2476
2477
2478 QuantizedLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2479 inputToInputWeightsDimensions, inputToInputWeightsValue,
2480 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2481 inputToCellWeightsDimensions, inputToCellWeightsValue,
2482 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2483 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2484 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2485 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2486 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2487 inputGateBiasDimensions, inputGateBiasValue,
2488 forgetGateBiasDimensions, forgetGateBiasValue,
2489 cellBiasDimensions, cellBiasValue,
2490 outputGateBiasDimensions, outputGateBiasValue,
2491 previousOutputInDimensions, previousOutputInValue,
2492 previousCellStateInDimensions, previousCellStateInValue,
2493 cellStateOutDimensions, cellStateOutValue,
2494 outputDimensions, outputValue);
2495}