blob: 2cb3c264c0a8c2ef6fc1268d24a116949640ef2a [file] [log] [blame]
telsoa01ce3e84a2018-08-31 09:31:35 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa01ce3e84a2018-08-31 09:31:35 +01004//
Matteo Martincighc7434122018-11-14 12:27:04 +00005
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01006#pragma once
7
8#include "DriverTestHelpers.hpp"
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +01009
Jan Eilers0b7a4192020-03-09 18:20:42 +000010#include <armnn/utility/IgnoreUnused.hpp>
11
telsoa01ce3e84a2018-08-31 09:31:35 +010012#include <boost/math/special_functions/relative_difference.hpp>
telsoa01ce3e84a2018-08-31 09:31:35 +010013
Colm Doneland7fdbe22020-10-30 16:57:43 +000014#include <array>
15
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +010016using ArmnnDriver = armnn_driver::ArmnnDriver;
telsoa01ce3e84a2018-08-31 09:31:35 +010017using DriverOptions = armnn_driver::DriverOptions;
Sadik Armagan188675f2021-02-12 17:16:42 +000018using RequestArgument = V1_0::RequestArgument;
19
20#ifdef ARMNN_ANDROID_S
21#include <nnapi/Types.h>
22#endif
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +010023
telsoa01ce3e84a2018-08-31 09:31:35 +010024using namespace driverTestHelpers;
25using namespace android::hardware;
26
27namespace
28{
29
30template<typename T>
Matteo Martincighc7434122018-11-14 12:27:04 +000031RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
telsoa01ce3e84a2018-08-31 09:31:35 +010032{
Sadik Armagan188675f2021-02-12 17:16:42 +000033 V1_0::DataLocation inputInloc = {};
telsoa01ce3e84a2018-08-31 09:31:35 +010034 inputInloc.poolIndex = poolIndex;
35 inputInloc.offset = 0;
36 inputInloc.length = value.size() * sizeof(T);
37 RequestArgument inputRequestArgument = {};
38 inputRequestArgument.location = inputInloc;
39 inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
40 return inputRequestArgument;
41}
42
43// Returns true if the relative difference between two float values is less than the tolerance value given.
44// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
45bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f)
46{
47 float rd;
48 if (a == 0.0f)
49 {
50 rd = fabs(b);
51 }
52 else if (b == 0.0f)
53 {
54 rd = fabs(a);
55 }
56 else
57 {
58 rd = boost::math::relative_difference(a, b);
59 }
60 return rd < tolerance;
61}
62
Kevin Mayf29a2c52019-03-14 11:56:32 +000063// Helper function to create an OperandLifeTime::NO_VALUE for testing.
64// To be used on optional input operands that have no values - these are valid and should be tested.
Kevin Mayec1e5b82020-02-26 17:00:39 +000065V1_0::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
Kevin Mayf29a2c52019-03-14 11:56:32 +000066{
67 // Only create a NO_VALUE for optional operands that have no elements
68 if (dimensions.size() == 0 || dimensions[0] == 0)
69 {
Kevin Mayec1e5b82020-02-26 17:00:39 +000070 return V1_0::OperandLifeTime::NO_VALUE;
Kevin Mayf29a2c52019-03-14 11:56:32 +000071 }
Kevin Mayec1e5b82020-02-26 17:00:39 +000072 return V1_0::OperandLifeTime::CONSTANT_COPY;
Kevin Mayf29a2c52019-03-14 11:56:32 +000073}
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010074
75template<typename HalModel>
Kevin Mayec1e5b82020-02-26 17:00:39 +000076void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const V1_0::Request& request)
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010077{
78 android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, driver);
79 if (preparedModel.get() != nullptr)
80 {
81 Execute(preparedModel, request);
82 }
83}
84
Sadik Armagan6a903a72020-05-26 10:41:54 +010085#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010086
87template<>
88void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::hal_1_2::HalPolicy::Model& model,
89 armnn_driver::ArmnnDriver& driver,
Kevin Mayec1e5b82020-02-26 17:00:39 +000090 const V1_0::Request& request)
Ferran Balaguerb2397fd2019-07-25 12:12:39 +010091{
92 android::sp<V1_2::IPreparedModel> preparedModel = PrepareModel_1_2(model, driver);
93 if (preparedModel.get() != nullptr)
94 {
95 Execute(preparedModel, request);
96 }
97}
98
99#endif
100
Matteo Martincighc7434122018-11-14 12:27:04 +0000101} // anonymous namespace
telsoa01ce3e84a2018-08-31 09:31:35 +0100102
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100103#ifndef ARMCOMPUTECL_ENABLED
Colm Doneland7fdbe22020-10-30 16:57:43 +0000104static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100105#else
Colm Doneland7fdbe22020-10-30 16:57:43 +0000106static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100107#endif
telsoa01ce3e84a2018-08-31 09:31:35 +0100108
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100109// Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
110template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000111void LstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
112 const std::vector<float>& inputValue,
113 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
114 const std::vector<float>& inputToInputWeightsValue,
115 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
116 const std::vector<float>& inputToForgetWeightsValue,
117 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
118 const std::vector<float>& inputToCellWeightsValue,
119 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
120 const std::vector<float>& inputToOutputWeightsValue,
121 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
122 const std::vector<float>& recurrentToInputWeightsValue,
123 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
124 const std::vector<float>& recurrentToForgetWeightsValue,
125 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
126 const std::vector<float>& recurrentToCellWeightsValue,
127 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
128 const std::vector<float>& recurrentToOutputWeightsValue,
129 const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
130 const std::vector<float>& cellToInputWeightsValue,
131 const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
132 const std::vector<float>& cellToForgetWeightsValue,
133 const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
134 const std::vector<float>& cellToOutputWeightsValue,
135 const hidl_vec<uint32_t>& inputGateBiasDimensions,
136 const std::vector<float>& inputGateBiasValue,
137 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
138 const std::vector<float>& forgetGateBiasValue,
139 const hidl_vec<uint32_t>& cellBiasDimensions,
140 const std::vector<float>& cellBiasValue,
141 const hidl_vec<uint32_t>& outputGateBiasDimensions,
142 const std::vector<float>& outputGateBiasValue,
143 const hidl_vec<uint32_t>& projectionWeightsDimensions,
144 const std::vector<float>& projectionWeightsValue,
145 const hidl_vec<uint32_t>& projectionBiasDimensions,
146 const std::vector<float>& projectionBiasValue,
147 const hidl_vec<uint32_t>& outputStateInDimensions,
148 const std::vector<float>& outputStateInValue,
149 const hidl_vec<uint32_t>& cellStateInDimensions,
150 const std::vector<float>& cellStateInValue,
151 const hidl_vec<uint32_t>& activationFunctionDimensions,
152 const std::vector<int32_t>& activationFunctionValue,
153 const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
154 const std::vector<float>& cellClippingThresholdValue,
155 const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
156 const std::vector<float>& projectionClippingThresholdValue,
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100157 const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
158 const std::vector<float>& inputLayerNormWeightsValue,
159 const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
160 const std::vector<float>& forgetLayerNormWeightsValue,
161 const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
162 const std::vector<float>& cellLayerNormWeightsValue,
163 const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
164 const std::vector<float>& outputLayerNormWeightsValue,
Matteo Martincighc7434122018-11-14 12:27:04 +0000165 const hidl_vec<uint32_t>& scratchBufferDimensions,
166 const std::vector<float>& scratchBufferValue,
167 const hidl_vec<uint32_t>& outputStateOutDimensions,
168 const std::vector<float>& outputStateOutValue,
169 const hidl_vec<uint32_t>& cellStateOutDimensions,
170 const std::vector<float>& cellStateOutValue,
171 const hidl_vec<uint32_t>& outputDimensions,
172 const std::vector<float>& outputValue,
173 armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100174{
Matteo Martincighc7434122018-11-14 12:27:04 +0000175 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100176 using Model = typename HalPolicy::Model;
177 Model model = {};
telsoa01ce3e84a2018-08-31 09:31:35 +0100178
179 // Inputs:
180 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
181 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100182 AddInputOperand<HalPolicy>(model, inputDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100183
184 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
185 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100186 AddTensorOperand<HalPolicy>(model,
187 inputToInputWeightsDimensions,
188 inputToInputWeightsValue,
189 HalPolicy::OperandType::TENSOR_FLOAT32,
190 CreateNoValueLifeTime(inputToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100191 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
192 // [num_units, input_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100193 AddTensorOperand<HalPolicy>(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
194 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
195 // [num_units, input_size].
196 AddTensorOperand<HalPolicy>(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100197 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
198 // [num_units, input_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100199 AddTensorOperand<HalPolicy>(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100200 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
201 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
202 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100203 AddTensorOperand<HalPolicy>(model,
204 recurrentToInputWeightsDimensions,
205 recurrentToInputWeightsValue,
206 HalPolicy::OperandType::TENSOR_FLOAT32,
207 CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100208 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
209 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100210 AddTensorOperand<HalPolicy>(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100211 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
212 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100213 AddTensorOperand<HalPolicy>(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100214 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
215 // [num_units, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100216 AddTensorOperand<HalPolicy>(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100217 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100218 AddTensorOperand<HalPolicy>(model,
219 cellToInputWeightsDimensions,
220 cellToInputWeightsValue,
221 HalPolicy::OperandType::TENSOR_FLOAT32,
222 CreateNoValueLifeTime(cellToInputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100223 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100224 AddTensorOperand<HalPolicy>(model,
225 cellToForgetWeightsDimensions,
226 cellToForgetWeightsValue,
227 HalPolicy::OperandType::TENSOR_FLOAT32,
228 CreateNoValueLifeTime(cellToForgetWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100229 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100230 AddTensorOperand<HalPolicy>(model,
231 cellToOutputWeightsDimensions,
232 cellToOutputWeightsValue,
233 HalPolicy::OperandType::TENSOR_FLOAT32,
234 CreateNoValueLifeTime(cellToOutputWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100235 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100236 AddTensorOperand<HalPolicy>(model,
237 inputGateBiasDimensions,
238 inputGateBiasValue,
239 HalPolicy::OperandType::TENSOR_FLOAT32,
240 CreateNoValueLifeTime(inputGateBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100241 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100242 AddTensorOperand<HalPolicy>(model, forgetGateBiasDimensions, forgetGateBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100243 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100244 AddTensorOperand<HalPolicy>(model, cellBiasDimensions, cellBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100245 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100246 AddTensorOperand<HalPolicy>(model, outputGateBiasDimensions, outputGateBiasValue);
telsoa01ce3e84a2018-08-31 09:31:35 +0100247 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
248 // [output_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100249 AddTensorOperand<HalPolicy>(model,
250 projectionWeightsDimensions,
251 projectionWeightsValue,
252 HalPolicy::OperandType::TENSOR_FLOAT32,
253 CreateNoValueLifeTime(projectionWeightsDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100254 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100255 AddTensorOperand<HalPolicy>(model,
256 projectionBiasDimensions,
257 projectionBiasValue,
258 HalPolicy::OperandType::TENSOR_FLOAT32,
259 CreateNoValueLifeTime(projectionBiasDimensions));
telsoa01ce3e84a2018-08-31 09:31:35 +0100260
261 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100262 AddInputOperand<HalPolicy>(model, outputStateInDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100263 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100264 AddInputOperand<HalPolicy>(model, cellStateInDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100265
Matteo Martincighc7434122018-11-14 12:27:04 +0000266 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100267 // 20: The activation function: A value indicating the activation function:
268 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100269 AddTensorOperand<HalPolicy>(model,
270 activationFunctionDimensions,
271 activationFunctionValue,
272 HalPolicy::OperandType::INT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100273 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
274 // If set to 0.0 then clipping is disabled.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100275 AddTensorOperand<HalPolicy>(model,
276 cellClippingThresholdDimensions,
277 cellClippingThresholdValue,
278 HalPolicy::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100279 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
280 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100281 AddTensorOperand<HalPolicy>(model,
282 projectionClippingThresholdDimensions,
283 projectionClippingThresholdValue,
284 HalPolicy::OperandType::FLOAT32);
telsoa01ce3e84a2018-08-31 09:31:35 +0100285
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100286 bool normalizationEnabled = false;
287
288 // If any of the tensors have a value all normalization tensors are set
289 if (!inputLayerNormWeightsValue.empty() ||
290 !forgetLayerNormWeightsValue.empty() ||
291 !cellLayerNormWeightsValue.empty() ||
292 !outputLayerNormWeightsValue.empty())
293 {
294 // Normalization:
295 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
296 // Used to rescale normalized inputs to activation at input gate.
297 AddTensorOperand<HalPolicy>(model,
298 inputLayerNormWeightsDimensions,
299 inputLayerNormWeightsValue,
300 HalPolicy::OperandType::TENSOR_FLOAT32,
301 CreateNoValueLifeTime(inputLayerNormWeightsDimensions));
302 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
303 // Used to rescale normalized inputs to activation at forget gate.
304 AddTensorOperand<HalPolicy>(model,
305 forgetLayerNormWeightsDimensions,
306 forgetLayerNormWeightsValue,
307 HalPolicy::OperandType::TENSOR_FLOAT32,
308 CreateNoValueLifeTime(forgetLayerNormWeightsDimensions));
309 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
310 // Used to rescale normalized inputs to activation at cell gate.
311 AddTensorOperand<HalPolicy>(model,
312 cellLayerNormWeightsDimensions,
313 cellLayerNormWeightsValue,
314 HalPolicy::OperandType::TENSOR_FLOAT32,
315 CreateNoValueLifeTime(cellLayerNormWeightsDimensions));
316 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
317 // Used to rescale normalized inputs to activation at output gate.
318 AddTensorOperand<HalPolicy>(model,
319 outputLayerNormWeightsDimensions,
320 outputLayerNormWeightsValue,
321 HalPolicy::OperandType::TENSOR_FLOAT32,
322 CreateNoValueLifeTime(outputLayerNormWeightsDimensions));
323
324 normalizationEnabled = true;
325 }
326
telsoa01ce3e84a2018-08-31 09:31:35 +0100327 // Outputs:
328 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
329 // CIFG, or [batch_size, num_units * 3] without CIFG.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100330 AddOutputOperand<HalPolicy>(model, scratchBufferDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100331 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100332 AddOutputOperand<HalPolicy>(model, outputStateOutDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100333 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100334 AddOutputOperand<HalPolicy>(model, cellStateOutDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100335 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
336 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100337 AddOutputOperand<HalPolicy>(model, outputDimensions);
telsoa01ce3e84a2018-08-31 09:31:35 +0100338
339 // make the lstm operation
340 model.operations.resize(1);
Aron Virginas-Tar44cfd842019-06-14 15:45:03 +0100341 model.operations[0].type = HalPolicy::OperationType::LSTM;
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100342
343 if (normalizationEnabled)
344 {
345 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
346 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26};
347 model.operations[0].outputs = hidl_vec<uint32_t> {27, 28, 29, 30};
348 }
349 else
350 {
351 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
352 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22};
353 model.operations[0].outputs = hidl_vec<uint32_t> {23, 24, 25, 26};
354 }
telsoa01ce3e84a2018-08-31 09:31:35 +0100355
356 // define the input values
357 hidl_vec<RequestArgument> inputArguments;
358 inputArguments.resize(3);
359
360 inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
361 inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
362 inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
363
364 // define the expected output values
365 hidl_vec<RequestArgument> outputArguments;
366 outputArguments.resize(4);
367
368 outputArguments[0] = CreateRequestArgument<float>(scratchBufferValue, 3);
369 outputArguments[1] = CreateRequestArgument<float>(outputStateOutValue, 4);
370 outputArguments[2] = CreateRequestArgument<float>(cellStateOutValue, 5);
371 outputArguments[3] = CreateRequestArgument<float>(outputValue, 6);
372
Kevin Mayec1e5b82020-02-26 17:00:39 +0000373 V1_0::Request request = {};
telsoa01ce3e84a2018-08-31 09:31:35 +0100374 request.inputs = inputArguments;
375 request.outputs = outputArguments;
376
377 // set the input data
378 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
379 AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
380 AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
381
382 // add memory for the outputs
Ellen Norris-Thompson976ad3e2019-08-21 15:21:14 +0100383 AddPoolAndGetData<float>(scratchBufferValue.size(), request);
384 android::sp<IMemory> outputStateOutMemory = AddPoolAndGetData<float>(outputStateOutValue.size(), request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100385 float* outputStateOutData = static_cast<float*>(static_cast<void*>(outputStateOutMemory->getPointer()));
Ellen Norris-Thompson976ad3e2019-08-21 15:21:14 +0100386 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<float>(cellStateOutValue.size(), request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100387 float* cellStateOutData = static_cast<float*>(static_cast<void*>(cellStateOutMemory->getPointer()));
Ellen Norris-Thompson976ad3e2019-08-21 15:21:14 +0100388 android::sp<IMemory> outputMemory = AddPoolAndGetData<float>(outputValue.size(), request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100389 float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
390
391 // make the prepared model and run the execution
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100392 ExecuteModel(model, *driver, request);
telsoa01ce3e84a2018-08-31 09:31:35 +0100393
394 // check the results
395 for (size_t i = 0; i < outputStateOutValue.size(); ++i)
396 {
397 BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
398 "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
399 }
400 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
401 {
402 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
403 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
404 }
405 for (size_t i = 0; i < outputValue.size(); ++i)
406 {
407 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
408 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
409 }
410}
411
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100412template <typename HalPolicy>
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100413void QuantizedLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
414 const std::vector<uint8_t>& inputValue,
415 const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
416 const std::vector<uint8_t>& inputToInputWeightsValue,
417 const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
418 const std::vector<uint8_t>& inputToForgetWeightsValue,
419 const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
420 const std::vector<uint8_t>& inputToCellWeightsValue,
421 const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
422 const std::vector<uint8_t>& inputToOutputWeightsValue,
423 const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
424 const std::vector<uint8_t>& recurrentToInputWeightsValue,
425 const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
426 const std::vector<uint8_t>& recurrentToForgetWeightsValue,
427 const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
428 const std::vector<uint8_t>& recurrentToCellWeightsValue,
429 const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
430 const std::vector<uint8_t>& recurrentToOutputWeightsValue,
431 const hidl_vec<uint32_t>& inputGateBiasDimensions,
432 const std::vector<int32_t>& inputGateBiasValue,
433 const hidl_vec<uint32_t>& forgetGateBiasDimensions,
434 const std::vector<int32_t>& forgetGateBiasValue,
435 const hidl_vec<uint32_t>& cellBiasDimensions,
436 const std::vector<int32_t>& cellBiasValue,
437 const hidl_vec<uint32_t>& outputGateBiasDimensions,
438 const std::vector<int32_t>& outputGateBiasValue,
439 const hidl_vec<uint32_t>& previousOutputInDimensions,
440 const std::vector<uint8_t>& previousOutputInValue,
441 const hidl_vec<uint32_t>& previousCellStateInDimensions,
442 const std::vector<int16_t>& previousCellStateInValue,
443 const hidl_vec<uint32_t>& cellStateOutDimensions,
444 const std::vector<int16_t>& cellStateOutValue,
445 const hidl_vec<uint32_t>& outputDimensions,
446 const std::vector<uint8_t>& outputValue)
447{
448 auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::GpuAcc));
449 using Model = typename HalPolicy::Model;
450 Model model = {};
451
452 float inputOutputScale = 0.0078125f;
453 int32_t inputOutputOffset = 128;
454
455 float cellStateScale = 0.00048828125f;
456 int32_t cellStateOffset = 0;
457
458 float weightsScale = 0.00408021f;
459 int32_t weightsOffset = 100;
460
461 float biasScale = 3.1876640625e-05f;
462 int32_t biasOffset = 0;
463
464 // Inputs:
465 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
466 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
467 AddInputOperand<HalPolicy>(model,
468 inputDimensions,
469 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
470 inputOutputScale,
471 inputOutputOffset);
472
473 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
474 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
475 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
476 AddTensorOperand<HalPolicy>(model,
477 inputToInputWeightsDimensions,
478 inputToInputWeightsValue,
479 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
480 CreateNoValueLifeTime(inputToInputWeightsDimensions),
481 weightsScale,
482 weightsOffset);
483 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
484 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
485 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
486 AddTensorOperand<HalPolicy>(model,
487 inputToForgetWeightsDimensions,
488 inputToForgetWeightsValue,
489 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
490 CreateNoValueLifeTime(inputToForgetWeightsDimensions),
491 weightsScale,
492 weightsOffset);
493 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
494 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
495 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
496 AddTensorOperand<HalPolicy>(model,
497 inputToCellWeightsDimensions,
498 inputToCellWeightsValue,
499 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
500 CreateNoValueLifeTime(inputToCellWeightsDimensions),
501 weightsScale,
502 weightsOffset);
503 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
504 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
505 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
506 AddTensorOperand<HalPolicy>(model,
507 inputToOutputWeightsDimensions,
508 inputToOutputWeightsValue,
509 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
510 CreateNoValueLifeTime(inputToOutputWeightsDimensions),
511 weightsScale,
512 weightsOffset);
513 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
514 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
515 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
516 AddTensorOperand<HalPolicy>(model,
517 recurrentToInputWeightsDimensions,
518 recurrentToInputWeightsValue,
519 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
520 CreateNoValueLifeTime(recurrentToInputWeightsDimensions),
521 weightsScale,
522 weightsOffset);
523 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
524 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
525 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
526 AddTensorOperand<HalPolicy>(model,
527 recurrentToForgetWeightsDimensions,
528 recurrentToForgetWeightsValue,
529 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
530 CreateNoValueLifeTime(recurrentToForgetWeightsDimensions),
531 weightsScale,
532 weightsOffset);
533 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
534 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
535 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
536 AddTensorOperand<HalPolicy>(model,
537 recurrentToCellWeightsDimensions,
538 recurrentToCellWeightsValue,
539 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
540 CreateNoValueLifeTime(recurrentToCellWeightsDimensions),
541 weightsScale,
542 weightsOffset);
543 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
544 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
545 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
546 AddTensorOperand<HalPolicy>(model,
547 recurrentToOutputWeightsDimensions,
548 recurrentToOutputWeightsValue,
549 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
550 CreateNoValueLifeTime(recurrentToOutputWeightsDimensions),
551 weightsScale,
552 weightsOffset);
553 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
554 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
555 // of input and weights scales and zeroPoint equal to 0.
556 AddTensorOperand<HalPolicy>(model,
557 inputGateBiasDimensions,
558 inputGateBiasValue,
559 HalPolicy::OperandType::TENSOR_INT32,
560 CreateNoValueLifeTime(inputGateBiasDimensions),
561 biasScale,
562 biasOffset);
563 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
564 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
565 // of input and weights scales and zeroPoint equal to 0.
566 AddTensorOperand<HalPolicy>(model,
567 forgetGateBiasDimensions,
568 forgetGateBiasValue,
569 HalPolicy::OperandType::TENSOR_INT32,
570 CreateNoValueLifeTime(forgetGateBiasDimensions),
571 biasScale,
572 biasOffset);
573 // 11: The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
574 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
575 // and weights scales and zeroPoint equal to 0.
576 AddTensorOperand<HalPolicy>(model,
577 cellBiasDimensions,
578 cellBiasValue,
579 HalPolicy::OperandType::TENSOR_INT32,
580 CreateNoValueLifeTime(cellBiasDimensions),
581 biasScale,
582 biasOffset);
583 // 12: The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
584 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
585 // of input and weights scales and zeroPoint equal to 0.
586 AddTensorOperand<HalPolicy>(model,
587 outputGateBiasDimensions,
588 outputGateBiasValue,
589 HalPolicy::OperandType::TENSOR_INT32,
590 CreateNoValueLifeTime(outputGateBiasDimensions),
591 biasScale,
592 biasOffset);
593
594 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
595 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
596 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
597 AddInputOperand<HalPolicy>(model,
598 previousCellStateInDimensions,
599 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
600 cellStateScale,
601 cellStateOffset);
602 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
603 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
604 // is quantized with a fixed quantization range of -1, 127/128.
605 AddInputOperand<HalPolicy>(model,
606 previousOutputInDimensions,
607 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
608 inputOutputScale,
609 inputOutputOffset);
610
611 // Outputs:
612 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
613 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
614 // of -2^4, 2^4 * 32767/32768.
615 AddOutputOperand<HalPolicy>(model,
616 cellStateOutDimensions,
617 HalPolicy::OperandType::TENSOR_QUANT16_SYMM,
618 cellStateScale,
619 cellStateOffset);
620 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
621 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
622 AddOutputOperand<HalPolicy>(model,
623 outputDimensions,
624 HalPolicy::OperandType::TENSOR_QUANT8_ASYMM,
625 inputOutputScale,
626 inputOutputOffset);
627
628 // make the lstm operation
629 model.operations.resize(1);
630 model.operations[0].type = HalPolicy::OperationType::QUANTIZED_16BIT_LSTM;
631
632 model.operations[0].inputs = hidl_vec<uint32_t> { 0, 1, 2, 3, 4, 5, 6, 7,
633 8, 9, 10, 11, 12, 13, 14};
634 model.operations[0].outputs = hidl_vec<uint32_t> {15, 16};
635
636 // define the input values
637 hidl_vec<RequestArgument> inputArguments;
638 inputArguments.resize(3);
639
640 inputArguments[0] = CreateRequestArgument<uint8_t>(inputValue, 0);
641 inputArguments[1] = CreateRequestArgument<int16_t>(previousCellStateInValue, 1);
642 inputArguments[2] = CreateRequestArgument<uint8_t>(previousOutputInValue, 2);
643
644 // define the expected output values
645 hidl_vec<RequestArgument> outputArguments;
646 outputArguments.resize(2);
647
648 outputArguments[0] = CreateRequestArgument<int16_t>(cellStateOutValue, 3);
649 outputArguments[1] = CreateRequestArgument<uint8_t>(outputValue, 4);
650
Kevin Mayec1e5b82020-02-26 17:00:39 +0000651 V1_0::Request request = {};
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100652 request.inputs = inputArguments;
653 request.outputs = outputArguments;
654
655 // set the input data
656 AddPoolAndSetData(inputValue.size(), request, inputValue.data());
657 AddPoolAndSetData(previousCellStateInValue.size(), request, previousCellStateInValue.data());
658 AddPoolAndSetData(previousOutputInValue.size(), request, previousOutputInValue.data());
659
660 // add memory for the outputs
661 android::sp<IMemory> cellStateOutMemory = AddPoolAndGetData<int16_t>(cellStateOutValue.size(), request);
662 int16_t* cellStateOutData = static_cast<int16_t*>(static_cast<void*>(cellStateOutMemory->getPointer()));
663 android::sp<IMemory> outputMemory = AddPoolAndGetData<uint8_t>(outputValue.size(), request);
664 uint8_t* outputData = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
665
666 // make the prepared model and run the execution
667 ExecuteModel(model, *driver, request);
668
669 // check the results
670 for (size_t i = 0; i < cellStateOutValue.size(); ++i)
671 {
672 BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i], 1.0f),
673 "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
674 }
675 for (size_t i = 0; i < outputValue.size(); ++i)
676 {
677 BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i], 1.0f),
678 "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
679 }
680}
681
682template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000683void LstmNoCifgNoPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100684{
685 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm.model.cpp
686 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm.example.cpp
687 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
688
Matteo Martincighc7434122018-11-14 12:27:04 +0000689 uint32_t batchSize = 1;
690 uint32_t inputSize = 2;
691 uint32_t numUnits = 4;
692 uint32_t outputSize = numUnits;
693
telsoa01ce3e84a2018-08-31 09:31:35 +0100694 // Inputs:
695 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
696 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000697 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
698 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100699
700 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
701 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000702 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
703 std::vector<float> inputToInputWeightsValue{-0.45018822f, -0.02338299f,
704 -0.08705890f, -0.34550029f,
705 0.04266912f, -0.15680569f,
706 -0.34856534f, 0.43890524f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100707 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
708 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000709 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
710 std::vector<float> inputToForgetWeightsValue{ 0.09701663f, 0.20334584f,
711 -0.50592935f, -0.31343272f,
712 -0.40032279f, 0.44781327f,
713 0.01387155f, -0.35593212f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100714 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000715 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
716 std::vector<float> inputToCellWeightsValue{-0.50013041f, 0.13702840f,
717 0.11810488f, 0.20131630f,
718 -0.20583314f, 0.44344562f,
719 0.22077113f, -0.29909778f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100720 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
721 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000722 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
723 std::vector<float> inputToOutputWeightsValue{-0.25065863f, -0.28290087f,
724 0.04613829f, 0.40525138f,
725 0.44272184f, 0.03897077f,
726 -0.15568960f, 0.19487578f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100727 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
728 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
729 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000730 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
731 std::vector<float> recurrentToInputWeightsValue{-0.00635350f, -0.20423880f, 0.31454784f, -0.35746509f,
732 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f,
733 -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f,
734 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100735 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
736 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000737 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
738 std::vector<float> recurrentToForgetWeightsValue{-0.48684245f, -0.06655136f, 0.42224967f, 0.21126390f,
739 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f,
740 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f,
741 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100742 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
743 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000744 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
745 std::vector<float> recurrentToCellWeightsValue{-0.34074140f, 0.24443203f, -0.20785320f, 0.26320225f,
746 0.05695659f, -0.00123841f, -0.47447860f, -0.35869038f,
747 -0.06418842f, -0.13502428f, -0.50176400f, 0.22830659f,
748 -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100749 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
750 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000751 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
752 std::vector<float> recurrentToOutputWeightsValue{ 0.43385774f, -0.17194885f, 0.27182370f, 0.09215671f,
753 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f,
754 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f,
755 -0.51818722f, -0.15390486f, 0.04681480f, 0.39922136f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100756 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000757 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
758 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100759 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000760 hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
761 std::vector<float> cellToForgetWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100762 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000763 hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
764 std::vector<float> cellToOutputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100765 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000766 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
767 std::vector<float> inputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100768 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000769 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
770 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100771 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000772 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
773 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100774 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000775 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
776 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100777 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
778 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000779 hidl_vec<uint32_t> projectionWeightsDimensions{0};
780 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100781 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000782 hidl_vec<uint32_t> projectionBiasDimensions{0};
783 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100784
785 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000786 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
787 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100788 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000789 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
790 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100791
Matteo Martincighc7434122018-11-14 12:27:04 +0000792 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100793 // 20: The activation function: A value indicating the activation function:
794 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000795 hidl_vec<uint32_t> activationFunctionDimensions{};
796 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100797 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
798 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000799 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
800 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100801 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
802 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000803 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
804 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100805
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100806 // Normalization:
807 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
808 // Used to rescale normalized inputs to activation at input gate.
809 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
810 std::vector<float> inputLayerNormWeightsValue;
811 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
812 // Used to rescale normalized inputs to activation at forget gate.
813 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
814 std::vector<float> forgetLayerNormWeightsValue;
815 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
816 // Used to rescale normalized inputs to activation at cell gate.
817 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
818 std::vector<float> cellLayerNormWeightsValue;
819 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
820 // Used to rescale normalized inputs to activation at output gate.
821 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
822 std::vector<float> outputLayerNormWeightsValue;
823
telsoa01ce3e84a2018-08-31 09:31:35 +0100824 // Outputs:
825 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
826 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +0000827 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
828 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
829 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
830 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
831 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
832 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100833 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000834 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100835 std::vector<float> outputStateOutValue {-0.0297319f, 0.122947f, 0.208851f, -0.153588f};
836 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000837 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
telsoa01ce3e84a2018-08-31 09:31:35 +0100838 std::vector<float> cellStateOutValue {-0.145439f, 0.157475f, 0.293663f, -0.277353f};
839 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
840 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +0000841 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
telsoa01ce3e84a2018-08-31 09:31:35 +0100842 std::vector<float> outputValue {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f};
843
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100844 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
845 inputToInputWeightsDimensions, inputToInputWeightsValue,
846 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
847 inputToCellWeightsDimensions, inputToCellWeightsValue,
848 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
849 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
850 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
851 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
852 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
853 cellToInputWeightsDimensions, cellToInputWeightsValue,
854 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
855 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
856 inputGateBiasDimensions, inputGateBiasValue,
857 forgetGateBiasDimensions, forgetGateBiasValue,
858 cellBiasDimensions, cellBiasValue,
859 outputGateBiasDimensions, outputGateBiasValue,
860 projectionWeightsDimensions, projectionWeightsValue,
861 projectionBiasDimensions, projectionBiasValue,
862 outputStateInDimensions, outputStateInValue,
863 cellStateInDimensions, cellStateInValue,
864 activationFunctionDimensions, activationFunctionValue,
865 cellClippingThresholdDimensions, cellClippingThresholdValue,
866 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
867 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
868 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
869 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
870 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
871 scratchBufferDimensions, scratchBufferValue,
872 outputStateOutDimensions, outputStateOutValue,
873 cellStateOutDimensions, cellStateOutValue,
874 outputDimensions, outputValue,
875 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +0100876}
877
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100878template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +0000879void LstmCifgPeepholeNoProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +0100880{
881 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
882 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
883 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
884
Matteo Martincighc7434122018-11-14 12:27:04 +0000885 uint32_t batchSize = 1;
886 uint32_t inputSize = 2;
887 uint32_t numUnits = 4;
888 uint32_t outputSize = numUnits;
889
telsoa01ce3e84a2018-08-31 09:31:35 +0100890 // Inputs:
891 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
892 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +0000893 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
894 std::vector<float> inputValue{2.0f, 3.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100895
896 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
897 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +0000898 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
899 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100900 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
901 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000902 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
903 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
904 0.13056988f, -0.36333650f,
905 -0.22755712f, 0.28253698f,
906 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100907 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000908 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
909 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
910 -0.09624726f, 0.05100781f,
911 0.04717243f, 0.48944736f,
912 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100913 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
914 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000915 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
916 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
917 -0.55932593f, -0.09426838f,
918 -0.44257352f, 0.54939759f,
919 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100920 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
921 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
922 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +0000923 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
924 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100925 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
926 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000927 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
928 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
929 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
930 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
931 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100932 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
933 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000934 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
935 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
936 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
937 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
938 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100939 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
940 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000941 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
942 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
943 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
944 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
945 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100946 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000947 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
948 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100949 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000950 hidl_vec<uint32_t> cellToForgetWeightsDimensions{4};
951 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100952 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000953 hidl_vec<uint32_t> cellToOutputWeightsDimensions{4};
954 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100955 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000956 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
957 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100958 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000959 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
960 std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100961 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000962 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
963 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100964 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000965 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
966 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100967 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
968 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000969 hidl_vec<uint32_t> projectionWeightsDimensions{0};
970 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100971 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000972 hidl_vec<uint32_t> projectionBiasDimensions{0};
973 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +0100974
975 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +0000976 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
977 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100978 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +0000979 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
980 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +0100981
Matteo Martincighc7434122018-11-14 12:27:04 +0000982 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +0100983 // 20: The activation function: A value indicating the activation function:
984 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +0000985 hidl_vec<uint32_t> activationFunctionDimensions{};
986 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +0100987 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
988 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000989 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
990 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100991 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
992 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +0000993 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
994 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +0100995
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100996 // Normalization:
997 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
998 // Used to rescale normalized inputs to activation at input gate.
999 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1000 std::vector<float> inputLayerNormWeightsValue;
1001 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1002 // Used to rescale normalized inputs to activation at forget gate.
1003 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1004 std::vector<float> forgetLayerNormWeightsValue;
1005 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1006 // Used to rescale normalized inputs to activation at cell gate.
1007 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1008 std::vector<float> cellLayerNormWeightsValue;
1009 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1010 // Used to rescale normalized inputs to activation at output gate.
1011 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1012 std::vector<float> outputLayerNormWeightsValue;
1013
telsoa01ce3e84a2018-08-31 09:31:35 +01001014 // Outputs:
1015 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1016 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001017 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1018 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1019 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1020 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1021 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1022 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001023 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001024 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1025 std::vector<float> outputStateOutValue{-0.364445f, -0.00352185f, 0.128866f, -0.0516365f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001026 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001027 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1028 std::vector<float> cellStateOutValue{-0.760444f, -0.0180416f, 0.182264f, -0.0649371f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001029 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1030 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001031 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1032 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001033
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001034 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1035 inputToInputWeightsDimensions, inputToInputWeightsValue,
1036 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1037 inputToCellWeightsDimensions, inputToCellWeightsValue,
1038 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1039 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1040 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1041 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1042 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1043 cellToInputWeightsDimensions, cellToInputWeightsValue,
1044 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1045 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1046 inputGateBiasDimensions, inputGateBiasValue,
1047 forgetGateBiasDimensions, forgetGateBiasValue,
1048 cellBiasDimensions, cellBiasValue,
1049 outputGateBiasDimensions, outputGateBiasValue,
1050 projectionWeightsDimensions, projectionWeightsValue,
1051 projectionBiasDimensions, projectionBiasValue,
1052 outputStateInDimensions, outputStateInValue,
1053 cellStateInDimensions, cellStateInValue,
1054 activationFunctionDimensions, activationFunctionValue,
1055 cellClippingThresholdDimensions, cellClippingThresholdValue,
1056 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1057 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1058 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1059 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1060 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1061 scratchBufferDimensions, scratchBufferValue,
1062 outputStateOutDimensions, outputStateOutValue,
1063 cellStateOutDimensions, cellStateOutValue,
1064 outputDimensions, outputValue,
1065 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +01001066}
1067
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001068template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +00001069void LstmNoCifgPeepholeProjection(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +01001070{
1071 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm3.model.cpp
1072 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm3.example.cpp
1073 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1074
Matteo Martincighc7434122018-11-14 12:27:04 +00001075 uint32_t batchSize = 2;
1076 uint32_t inputSize = 5;
1077 uint32_t numUnits = 20;
1078 uint32_t outputSize = 16;
1079
telsoa01ce3e84a2018-08-31 09:31:35 +01001080 // Inputs:
1081 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1082 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +00001083 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1084 std::vector<float> inputValue{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1085 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001086
1087 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1088 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +00001089 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1090 std::vector<float> inputToInputWeightsValue
1091 {
1092 0.0213936830f, 0.0612455100f, 0.0469051670f, -0.0146576770f, -0.0314946300f,
1093 0.0917180300f, 0.1464780100f, 0.1079719300f, -0.0057968358f, 0.0019193048f,
1094 -0.2726754000f, 0.1015402900f, -0.0185398850f, 0.0803498850f, -0.1026238500f,
1095 -0.0225997870f, -0.0912115500f, -0.0086759670f, -0.0452061030f, -0.0821282000f,
1096 -0.0080459520f, 0.0154780810f, 0.0552172470f, 0.0387195870f, 0.0441536270f,
1097 -0.0645324300f, 0.0503182500f, -0.0469351080f, -0.0081644309f, 0.0145742260f,
1098 -0.1671009000f, -0.1551955200f, -0.1681979700f, -0.1397126900f, -0.1195305900f,
1099 0.2500548700f, -0.2279098300f, 0.0098550870f, -0.0281409580f, -0.1120069800f,
1100 0.1129540800f, -0.0035217577f, 0.0544850750f, 0.0518469500f, 0.0647112060f,
1101 0.1098919300f, 0.1167478600f, 0.0349060700f, 0.0772735700f, 0.1139058500f,
1102 -0.1863375000f, -0.1034451000f, -0.1394518900f, -0.0494012270f, -0.1876706300f,
1103 0.0424839030f, 0.1423355200f, 0.1383258100f, 0.1835016500f, 0.1454560300f,
1104 -0.0285457040f, 0.0249395310f, 0.0509297180f, 0.0076203286f, -0.0029723682f,
1105 -0.0424842240f, -0.1182759600f, -0.0917110400f, -0.1080862800f, -0.1632798800f,
1106 -0.2273378000f, -0.0993647000f, -0.0171551070f, 0.0023917493f, 0.0492727640f,
1107 0.0038534778f, 0.0547645050f, 0.0897537840f, 0.0694723400f, 0.0801447600f,
1108 -0.0454423400f, -0.0497073000f, -0.0713563100f, -0.0489291060f, -0.0040420120f,
1109 -0.0092840260f, 0.0180420540f, 0.0036860977f, -0.0742730200f, -0.1143460400f,
1110 -0.0189954560f, 0.0314875430f, 0.0128349080f, 0.0199777540f, 0.0442566540f,
1111 -0.3929261300f, -0.1851933400f, -0.1165128100f, -0.0680989200f, 0.0113736770f
1112 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001113 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1114 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001115 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1116 std::vector<float> inputToForgetWeightsValue
1117 {
1118 -0.0018401089f, -0.0048522370f, 0.0369842400f, 0.0141817040f, 0.0282732360f,
1119 -0.0167261940f, -0.0524975900f, -0.1020426100f, 0.0086106600f, -0.0409795050f,
1120 -0.0098991870f, 0.0192389200f, -0.0281772690f, -0.0853510300f, -0.1458549500f,
1121 0.1066256700f, -0.0190973100f, -0.0178835340f, -0.0047269356f, -0.0451033230f,
1122 0.0030784295f, 0.0767847750f, 0.0746369600f, 0.0945313950f, 0.0814421000f,
1123 -0.1225789900f, -0.0339457580f, -0.0313034650f, 0.0456306260f, 0.0684388700f,
1124 -0.1349294500f, -0.0124800070f, -0.0811829000f, -0.0722449900f, -0.0962879100f,
1125 0.0451009460f, 0.0012300825f, 0.0139646620f, 0.0993723940f, 0.0254305900f,
1126 0.0695832400f, 0.0342572960f, 0.0482646000f, 0.0626799700f, 0.0526250680f,
1127 0.1278466600f, 0.0707789700f, 0.0257259350f, 0.0416500900f, 0.0724190500f,
1128 0.0186686440f, -0.0373772940f, -0.0627778300f, -0.0883363600f, -0.0401206050f,
1129 -0.0114055860f, -0.0078083350f, -0.0103013860f, -0.0051021670f, 0.0277174640f,
1130 0.0548342300f, 0.1144911100f, 0.1128965200f, 0.1093983900f, 0.1339650600f,
1131 -0.0840216600f, -0.0190146200f, -0.0446783040f, -0.0772056500f, 0.0143500630f,
1132 -0.1175795800f, -0.0652038000f, -0.0818573300f, -0.0767543240f, -0.0926143750f,
1133 0.1040549100f, 0.0529603360f, 0.0357558950f, 0.0358393860f, -0.0125405530f,
1134 0.0368812980f, 0.0291337600f, 0.0342015900f, 0.0544844700f, -0.0545233530f,
1135 0.0258271500f, 0.0232735500f, -0.0118571790f, -0.0011980024f, -0.0346417170f,
1136 -0.0261250940f, -0.1758261500f, -0.1592365700f, -0.2748677400f, -0.0006143371f,
1137 0.0001771948f, -8.470171e-05f, 0.0265180700f, 0.0457907650f, 0.069564960f
1138 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001139 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001140 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1141 std::vector<float> inputToCellWeightsValue
1142 {
1143 -0.0458028300f, -0.0954946200f, -0.0324189850f, -0.0645463300f, -0.0435284530f,
1144 0.0430185870f, -0.0491523440f, -0.1241814400f, -0.0789854750f, -0.0759688900f,
1145 0.0194843620f, -0.1143496200f, -0.0074034138f, -0.0631484400f, -0.0929814950f,
1146 0.0062155537f, -0.0250343380f, -0.0028890965f, 0.0489295270f, 0.0623507500f,
1147 0.1066591800f, -0.0320367920f, -0.0850591600f, -0.1084335800f, -0.1300243300f,
1148 -0.0368164370f, -0.0213013400f, -0.0165182390f, 0.0047691227f, -0.0025825808f,
1149 0.0660178660f, 0.0299915340f, -0.1065283600f, -0.1037554000f, -0.1305607100f,
1150 -0.0326664300f, -0.0337024140f, -0.0064734240f, -0.0461169200f, 0.0144193390f,
1151 -0.0251743230f, 0.0396852000f, 0.0817775060f, 0.0615746800f, 0.1021009500f,
1152 -0.0096581940f, 0.0465117170f, 0.0360390600f, 0.0069369148f, 0.0159600950f,
1153 -0.0650766600f, 0.0955159800f, 0.0535688360f, 0.0640871400f, 0.1283566700f,
1154 -0.0087143290f, -0.2021196600f, -0.1209367400f, 0.0294504720f, 0.2849013000f,
1155 -0.0292279010f, 0.1164364000f, -0.0856026300f, 0.0994178600f, -0.0369995650f,
1156 -0.0288426260f, -0.0033637602f, -0.0170129020f, -0.0972086500f, -0.1119335100f,
1157 -0.0291551170f, -0.0179360340f, -0.0097689360f, -0.0422332400f, -0.0361596350f,
1158 0.0650511200f, -0.0217428920f, -0.0233772120f, -0.0722136400f, -0.0643055200f,
1159 0.0545386500f, 0.0911498140f, 0.0638733100f, 0.0075183930f, 0.0559609530f,
1160 0.0697793440f, 0.0464111680f, 0.1050991100f, 0.0746389400f, 0.0075130584f,
1161 0.0128509820f, 0.0455543100f, 0.0569556880f, 0.0655528500f, 0.0508014560f,
1162 -0.0098626830f, 0.0082677200f, -0.0265556090f, -0.0073611983f, -0.0014897042f
1163 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001164 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1165 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001166 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1167 std::vector<float> inputToOutputWeightsValue
1168 {
1169 -0.0998932000f, -0.0720195600f, -0.0528037730f, -0.1562959300f, -0.1500191800f,
1170 -0.0765075100f, 0.0235985500f, -0.0751553550f, -0.0803770900f, -0.1509353400f,
1171 0.0295175520f, -0.0475139300f, 0.0103505310f, -0.0266485100f, -0.0168397220f,
1172 -0.0231211630f, 0.0077019283f, 0.0128512570f, -0.0504064900f, -0.0129761000f,
1173 -0.0217377470f, -0.0383057930f, -0.0687058600f, -0.0148124700f, -0.0012853940f,
1174 0.1012423600f, 0.0831228350f, 0.0533130060f, -0.0622356460f, -0.0756371540f,
1175 -0.0278339030f, 0.0297749710f, 0.1130802000f, 0.0921890600f, 0.0950613500f,
1176 -0.0866657640f, -0.0371627060f, -0.0388809140f, -0.0358328450f, -0.0144815640f,
1177 -0.0982500300f, -0.1204856900f, -0.0976655860f, -0.0528763300f, -0.0964047000f,
1178 -0.1136642900f, 0.0357775050f, 0.1356881900f, 0.0524513830f, 0.0506493040f,
1179 0.0579895100f, -0.0218523350f, -0.0998488440f, 0.0147404750f, -0.0788979460f,
1180 0.0497469900f, 0.0141604730f, 0.0697393200f, 0.0496494200f, 0.0333646460f,
1181 0.0819012400f, 0.0255353670f, 0.0508931650f, 0.0485142540f, 0.0694581300f,
1182 -0.0789075640f, -0.0670761600f, -0.1184450800f, -0.0998668800f, -0.0750940300f,
1183 0.0626322600f, 0.1492558700f, 0.2018843600f, 0.1209845100f, 0.1463941500f,
1184 0.0015017595f, -0.0142673820f, -0.0341725700f, 0.0127114680f, 0.0028300495f,
1185 -0.0247584820f, -0.0509854800f, -0.0821182000f, 0.0142256720f, 0.0215441580f,
1186 0.0894972500f, 0.0750526800f, -0.0020780868f, 0.0490825800f, 0.0647629500f,
1187 -0.0229070630f, 0.0275624560f, 0.0401857350f, 0.0195675770f, -0.0155987390f,
1188 -0.0490973030f, -0.0171218660f, -0.0833682340f, -0.0233200200f, -0.084095600f
1189 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001190 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1191 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1192 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +00001193 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
1194 std::vector<float> recurrentToInputWeightsValue
1195 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001196 -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f, // 00
1197 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
1198 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
1199 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001200 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f, // 01
1201 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001202 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001203 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001204 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f, // 02
Matteo Martincighc7434122018-11-14 12:27:04 +00001205 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001206 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001207 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001208 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f, // 03
Matteo Martincighc7434122018-11-14 12:27:04 +00001209 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001210 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
1211 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
1212 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f, // 04
1213 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
1214 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001215 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
1216 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f, // 05
1217 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
1218 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
1219 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001220 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f, // 06
1221 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001222 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001223 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
1224 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f, // 07
1225 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
1226 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
1227 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
1228 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +00001229 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001230 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001231 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001232 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f, // 09
1233 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001234 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001235 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
1236 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f, // 10
Matteo Martincighc7434122018-11-14 12:27:04 +00001237 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001238 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001239 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
1240 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f, // 11
1241 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
1242 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
1243 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
1244 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f, // 12
telsoa01ce3e84a2018-08-31 09:31:35 +01001245 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
1246 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
1247 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
1248 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f, // 13
1249 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001250 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
1251 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001252 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +00001253 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
1254 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
1255 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001256 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f, // 15
1257 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001258 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001259 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
1260 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f, // 16
Matteo Martincighc7434122018-11-14 12:27:04 +00001261 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
1262 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001263 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
1264 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +00001265 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
1266 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
1267 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
1268 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f, // 18
telsoa01ce3e84a2018-08-31 09:31:35 +01001269 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
1270 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
1271 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
1272 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f, // 19
Matteo Martincighc7434122018-11-14 12:27:04 +00001273 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001274 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001275 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
1276 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001277 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1278 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001279 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1280 std::vector<float> recurrentToForgetWeightsValue
1281 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001282 -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f, // 00
Matteo Martincighc7434122018-11-14 12:27:04 +00001283 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001284 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001285 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
1286 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f, // 01
telsoa01ce3e84a2018-08-31 09:31:35 +01001287 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
1288 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001289 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001290 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f, // 02
1291 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001292 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001293 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
1294 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f, // 03
1295 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001296 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
1297 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001298 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f, // 04
Matteo Martincighc7434122018-11-14 12:27:04 +00001299 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
1300 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001301 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
1302 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f, // 05
Matteo Martincighc7434122018-11-14 12:27:04 +00001303 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001304 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001305 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001306 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f, // 06
Matteo Martincighc7434122018-11-14 12:27:04 +00001307 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001308 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001309 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
1310 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f, // 07
1311 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001312 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
1313 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
1314 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f, // 08
Matteo Martincighc7434122018-11-14 12:27:04 +00001315 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
1316 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
1317 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
1318 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f, // 09
telsoa01ce3e84a2018-08-31 09:31:35 +01001319 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001320 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
1321 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001322 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f, // 10
1323 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
1324 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001325 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
1326 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f, // 11
1327 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
1328 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001329 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
1330 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f, // 12
Matteo Martincighc7434122018-11-14 12:27:04 +00001331 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001332 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
1333 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
1334 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f, // 13
1335 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001336 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001337 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
1338 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f, // 14
Matteo Martincighc7434122018-11-14 12:27:04 +00001339 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001340 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001341 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
1342 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f, // 15
1343 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
1344 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
1345 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
1346 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f, // 16
telsoa01ce3e84a2018-08-31 09:31:35 +01001347 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001348 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001349 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
1350 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f, // 17
Matteo Martincighc7434122018-11-14 12:27:04 +00001351 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001352 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
1353 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001354 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f, // 18
1355 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
1356 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
1357 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001358 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f, // 19
1359 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001360 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
1361 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
1362 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001363 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1364 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001365 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1366 std::vector<float> recurrentToCellWeightsValue
1367 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001368 -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001369 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
1370 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001371 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001372 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
1373 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001374 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
1375 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
1376 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
1377 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001378 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001379 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
1380 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001381 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
1382 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
1383 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001384 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001385 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
1386 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001387 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001388 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
1389 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
1390 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001391 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001392 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001393 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001394 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001395 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001396 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001397 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001398 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
1399 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001400 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001401 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001402 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001403 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001404 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
1405 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
1406 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
1407 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001408 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
1409 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001410 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001411 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
1412 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
1413 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001414 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001415 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
1416 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
1417 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
1418 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001419 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001420 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001421 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001422 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001423 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
1424 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
1425 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
1426 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001427 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
1428 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001429 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
1430 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
1431 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
1432 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
1433 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001434 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
1435 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001436 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001437 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
1438 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001439 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
1440 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001441 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001442 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
1443 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001444 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001445 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
1446 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001447 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
1448 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001449 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1450 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001451 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1452 std::vector<float> recurrentToOutputWeightsValue
1453 {
1454 0.025825322f, -0.05813119f, 0.09495884f, -0.045984812f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001455 -0.01255415f, -0.0026479573f, -0.08196161f, -0.054914974f,
1456 -0.0046604523f, -0.029587349f, -0.044576716f, -0.07480124f,
1457 -0.082868785f, 0.023254942f, 0.027502948f, -0.0039728214f,
1458 -0.08683098f, -0.08116779f, -0.014675607f, -0.037924774f,
1459 -0.023314456f, -0.007401714f, -0.09255757f, 0.029460307f,
1460 -0.08829125f, -0.005139627f, -0.08989442f, -0.0555066f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001461 0.13596267f, -0.025062224f, -0.048351806f, -0.03850004f,
1462 0.07266485f, -0.022414139f, 0.05940088f, 0.075114764f,
1463 0.09597592f, -0.010211725f, -0.0049794707f, -0.011523867f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001464 -0.025980417f, 0.072999895f, 0.11091378f, -0.081685916f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001465 0.014416728f, 0.043229222f, 0.034178585f, -0.07530371f,
1466 0.035837382f, -0.085607f, -0.007721233f, -0.03287832f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001467 -0.043848954f, -0.06404588f, -0.06632928f, -0.073643476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001468 0.008214239f, -0.045984086f, 0.039764922f, 0.03474462f,
1469 0.060612556f, -0.080590084f, 0.049127717f, 0.04151091f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001470 -0.030063879f, 0.008801774f, -0.023021035f, -0.019558564f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001471 0.05158114f, -0.010947698f, -0.011825728f, 0.0075720972f,
1472 0.0699727f, -0.0039981045f, 0.069350146f, 0.08799282f,
1473 0.016156472f, 0.035502106f, 0.11695009f, 0.006217345f,
1474 0.13392477f, -0.037875112f, 0.025745004f, 0.08940699f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001475 -0.00924166f, 0.0046702605f, -0.036598757f, -0.08811812f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001476 0.10522024f, -0.032441203f, 0.008176899f, -0.04454919f,
1477 0.07058152f, 0.0067963637f, 0.039206743f, 0.03259838f,
1478 0.03725492f, -0.09515802f, 0.013326398f, -0.052055415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001479 -0.025676316f, 0.03198509f, -0.015951829f, -0.058556724f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001480 0.036879618f, 0.043357447f, 0.028362012f, -0.05908629f,
1481 0.0059240665f, -0.04995891f, -0.019187413f, 0.0276265f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001482 -0.01628143f, 0.0025863599f, 0.08800015f, 0.035250366f,
1483 -0.022165963f, -0.07328642f, -0.009415526f, -0.07455109f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001484 0.11690406f, 0.0363299f, 0.07411125f, 0.042103454f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001485 -0.009660886f, 0.019076364f, 0.018299393f, -0.046004917f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001486 0.08891175f, 0.0431396f, -0.026327137f, -0.051502608f,
1487 0.08979574f, -0.051670972f, 0.04940282f, -0.07491107f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001488 -0.021240504f, 0.022596184f, -0.034280192f, 0.060163025f,
1489 -0.058211457f, -0.051837247f, -0.01349775f, -0.04639988f,
1490 -0.035936575f, -0.011681591f, 0.064818054f, 0.0073146066f,
1491 -0.021745546f, -0.043124277f, -0.06471268f, -0.07053354f,
1492 -0.029321948f, -0.05330136f, 0.016933719f, -0.053782392f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001493 0.13747959f, -0.1361751f, -0.11569455f, 0.0033329215f,
1494 0.05693899f, -0.053219706f, 0.063698f, 0.07977434f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001495 -0.07924483f, 0.06936997f, 0.0034815092f, -0.007305279f,
1496 -0.037325785f, -0.07251102f, -0.033633437f, -0.08677009f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001497 0.091591336f, -0.14165086f, 0.021752775f, 0.019683983f,
1498 0.0011612234f, -0.058154266f, 0.049996935f, 0.0288841f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001499 -0.0024567875f, -0.14345716f, 0.010955264f, -0.10234828f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001500 0.1183656f, -0.0010731248f, -0.023590032f, -0.072285876f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001501 -0.0724771f, -0.026382286f, -0.0014920527f, 0.042667855f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001502 0.0018776858f, 0.02986552f, 0.009814309f, 0.0733756f,
1503 0.12289186f, 0.018043943f, -0.0458958f, 0.049412545f,
1504 0.033632483f, 0.05495232f, 0.036686596f, -0.013781798f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001505 -0.010036754f, 0.02576849f, -0.08307328f, 0.010112348f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001506 0.042521734f, -0.05869831f, -0.071689695f, 0.03876447f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001507 -0.13275425f, -0.0352966f, -0.023077697f, 0.10285965f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001508 0.084736146f, 0.15568255f, -0.00040734606f, 0.027835453f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001509 -0.10292561f, -0.032401145f, 0.10053256f, -0.026142767f,
1510 -0.08271222f, -0.0030240538f, -0.016368777f, 0.1070414f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001511 0.042672627f, 0.013456989f, -0.0437609f, -0.022309763f,
1512 0.11576483f, 0.04108048f, 0.061026827f, -0.0190714f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001513 -0.0869359f, 0.037901703f, 0.0610107f, 0.07202949f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001514 0.01675338f, 0.086139716f, -0.08795751f, -0.014898893f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001515 -0.023771819f, -0.01965048f, 0.007955471f, -0.043740474f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001516 0.03346837f, -0.10549954f, 0.090567775f, 0.042013682f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001517 -0.03176985f, 0.12569028f, -0.02421228f, -0.029526481f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001518 0.023851605f, 0.031539805f, 0.05292009f, -0.02344001f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001519 -0.07811758f, -0.08834428f, 0.10094801f, 0.16594367f,
1520 -0.06861939f, -0.021256343f, -0.041093912f, -0.06669611f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001521 0.035498552f, 0.021757556f, -0.09302526f, -0.015403468f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001522 -0.06614931f, -0.051798206f, -0.013874718f, 0.03630673f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001523 0.010412845f, -0.08077351f, 0.046185967f, 0.0035662893f,
1524 0.03541868f, -0.094149634f, -0.034814864f, 0.003128424f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001525 -0.020674974f, -0.03944324f, -0.008110165f, -0.11113267f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001526 0.08484226f, 0.043586485f, 0.040582247f, 0.0968012f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001527 -0.065249965f, -0.028036479f, 0.0050708856f, 0.0017462453f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001528 0.0326779f, 0.041296225f, 0.09164146f, -0.047743853f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001529 -0.015952192f, -0.034451712f, 0.084197424f, -0.05347844f,
1530 -0.11768019f, 0.085926116f, -0.08251791f, -0.045081906f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001531 0.0948852f, 0.068401024f, 0.024856757f, 0.06978981f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001532 -0.057309967f, -0.012775832f, -0.0032452994f, 0.01977615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001533 -0.041040014f, -0.024264973f, 0.063464895f, 0.05431621f
1534 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001535 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001536 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1537 std::vector<float> cellToInputWeightsValue
1538 {
1539 0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
1540 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
1541 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f, -0.052169047f,
1542 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
1543 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001544 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001545 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1546 std::vector<float> cellToForgetWeightsValue
1547 {
1548 -0.01998659f, -0.15568835f, -0.24248174f, -0.012770197f, 0.041331276f,
1549 -0.072311886f, -0.052123554f, -0.0066330447f, -0.043891653f, 0.036225766f,
1550 -0.047248036f, 0.021479502f, 0.033189066f, 0.11952997f, -0.020432774f,
1551 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
1552 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001553 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001554 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1555 std::vector<float> cellToOutputWeightsValue
1556 {
1557 0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
1558 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
1559 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
1560 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
1561 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001562 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001563 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1564 std::vector<float> inputGateBiasValue
1565 {
1566 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
1567 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
1568 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
1569 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
1570 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001571 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001572 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1573 std::vector<float> forgetGateBiasValue
1574 {
1575 0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f, 0.11098921f,
1576 0.15378423f, 0.09263801f, 0.09790885f, 0.09508917f, 0.061199076f,
1577 0.07665568f, -0.015443159f, -0.03499149f, 0.046190713f, 0.08895977f,
1578 0.10899629f, 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
1579 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001580 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001581 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1582 std::vector<float> cellBiasValue
1583 {
1584 -0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f, -0.1483596f,
1585 -0.10639995f, -0.091433935f, 0.058573797f, -0.06809782f, -0.07889636f,
1586 -0.043246906f, -0.09829136f, -0.4279842f, 0.034901652f, 0.18797937f,
1587 0.0075234566f, 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
1588 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001589 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001590 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1591 std::vector<float> outputGateBiasValue
1592 {
1593 0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
1594 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
1595 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
1596 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
1597 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001598 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1599 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001600 hidl_vec<uint32_t> projectionWeightsDimensions{outputSize, numUnits};
1601 std::vector<float> projectionWeightsValue
1602 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001603 -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001604 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001605 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
1606 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001607 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
1608 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
1609 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
1610 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001611 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
1612 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
1613 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001614 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
1615 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
1616 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
1617 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
1618 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001619 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001620 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001621 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001622 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001623 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
1624 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001625 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001626 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001627 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001628 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
1629 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001630 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001631 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
1632 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
1633 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001634 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
1635 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001636 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001637 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
1638 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
1639 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
1640 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
1641 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001642 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
1643 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001644 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001645 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
1646 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001647 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
1648 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
1649 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001650 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
1651 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
1652 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001653 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001654 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001655 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
1656 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001657 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
1658 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
1659 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001660 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001661 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
1662 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
1663 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001664 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
1665 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
1666 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
1667 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001668 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001669 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1670 std::vector<float> projectionBiasValue(outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001671
1672 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001673 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1674 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001675 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001676 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1677 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001678
Matteo Martincighc7434122018-11-14 12:27:04 +00001679 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001680 // 20: The activation function: A value indicating the activation function:
1681 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001682 hidl_vec<uint32_t> activationFunctionDimensions{};
1683 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001684 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1685 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001686 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1687 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001688 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1689 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001690 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1691 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001692
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001693 // Normalization:
1694 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1695 // Used to rescale normalized inputs to activation at input gate.
1696 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1697 std::vector<float> inputLayerNormWeightsValue;
1698 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1699 // Used to rescale normalized inputs to activation at forget gate.
1700 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1701 std::vector<float> forgetLayerNormWeightsValue;
1702 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1703 // Used to rescale normalized inputs to activation at cell gate.
1704 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1705 std::vector<float> cellLayerNormWeightsValue;
1706 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1707 // Used to rescale normalized inputs to activation at output gate.
1708 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1709 std::vector<float> outputLayerNormWeightsValue;
1710
telsoa01ce3e84a2018-08-31 09:31:35 +01001711 // Outputs:
1712 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1713 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001714 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1715 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1716 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1717 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1718 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
1719 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001720 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001721 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1722 std::vector<float> outputStateOutValue
1723 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001724 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835577f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001725 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415119f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001726 -0.013869f, 0.0287268f, -0.00334694f, 0.00733397f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001727 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.0216801f
1728 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001729 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001730 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1731 std::vector<float> cellStateOutValue
1732 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001733 -0.0531632f, -0.0118138f, 0.0870833f, 0.0347929f, -0.076144f,
1734 -0.0659219f, -0.0463811f, 0.0141307f, -0.0127706f, -0.03782f,
1735 -0.00402401f, -0.00571876f, -0.187957f, -0.0247127f, 0.0711425f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001736 0.008244f, 0.0492649f, 0.126972f, 0.0933097f, 0.29848f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001737 -0.0966178f, -0.114417f, 0.0387229f, 0.0453255f, -0.181286f,
1738 -0.0651251f, -0.0996879f, -0.00276995f, 0.0617558f, -0.0100728f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001739 0.056304f, -0.077416f, -0.162858f, -0.0541251f, 0.0571202f,
1740 -0.0525331f, 0.0724297f, 0.171029f, 0.141738f, 0.295483f
1741 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001742 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1743 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001744 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1745 std::vector<float> outputValue
1746 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001747 -0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, -0.0211779f, 0.0283512f, -0.0114597f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001748 0.00907307f, -0.0244004f, -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, 0.0134203f,
telsoa01ce3e84a2018-08-31 09:31:35 +01001749 -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, -0.0186926f, 0.0193662f, -0.0115437f,
Matteo Martincighc7434122018-11-14 12:27:04 +00001750 0.00422612f, -0.0345232f, 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f
1751 };
telsoa01ce3e84a2018-08-31 09:31:35 +01001752
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001753 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1754 inputToInputWeightsDimensions, inputToInputWeightsValue,
1755 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1756 inputToCellWeightsDimensions, inputToCellWeightsValue,
1757 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1758 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1759 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1760 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1761 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1762 cellToInputWeightsDimensions, cellToInputWeightsValue,
1763 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1764 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1765 inputGateBiasDimensions, inputGateBiasValue,
1766 forgetGateBiasDimensions, forgetGateBiasValue,
1767 cellBiasDimensions, cellBiasValue,
1768 outputGateBiasDimensions, outputGateBiasValue,
1769 projectionWeightsDimensions, projectionWeightsValue,
1770 projectionBiasDimensions, projectionBiasValue,
1771 outputStateInDimensions, outputStateInValue,
1772 cellStateInDimensions, cellStateInValue,
1773 activationFunctionDimensions, activationFunctionValue,
1774 cellClippingThresholdDimensions, cellClippingThresholdValue,
1775 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1776 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1777 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1778 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1779 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1780 scratchBufferDimensions, scratchBufferValue,
1781 outputStateOutDimensions, outputStateOutValue,
1782 cellStateOutDimensions, cellStateOutValue,
1783 outputDimensions, outputValue,
1784 compute);
telsoa01ce3e84a2018-08-31 09:31:35 +01001785}
1786
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001787template <typename HalPolicy>
Matteo Martincighc7434122018-11-14 12:27:04 +00001788void LstmCifgPeepholeNoProjectionBatch2(armnn::Compute compute)
telsoa01ce3e84a2018-08-31 09:31:35 +01001789{
1790 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/lstm2.model.cpp
1791 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/lstm2.example.cpp
1792 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1793 // The batch size has been increased to 2 (it was 1 in the VTS test) with appropriate input and output values added.
1794
1795 uint32_t batchSize = 2;
1796 uint32_t inputSize = 2;
1797 uint32_t numUnits = 4;
1798 uint32_t outputSize = numUnits;
1799
1800 // Inputs:
1801 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1802 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Matteo Martincighc7434122018-11-14 12:27:04 +00001803 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1804 std::vector<float> inputValue{2.0f, 3.0f, 3.0f, 4.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001805
1806 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1807 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Matteo Martincighc7434122018-11-14 12:27:04 +00001808 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1809 std::vector<float> inputToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001810 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1811 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001812 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1813 std::vector<float> inputToForgetWeightsValue{-0.55291498f, -0.42866567f,
1814 0.13056988f, -0.36333650f,
1815 -0.22755712f, 0.28253698f,
1816 0.24407166f, 0.33826375f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001817 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001818 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1819 std::vector<float> inputToCellWeightsValue{-0.49770179f, -0.27711356f,
1820 -0.09624726f, 0.05100781f,
1821 0.04717243f, 0.48944736f,
1822 -0.38535351f, -0.17212132f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001823 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1824 // [num_units, input_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001825 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1826 std::vector<float> inputToOutputWeightsValue{ 0.10725588f, -0.02335852f,
1827 -0.55932593f, -0.09426838f,
1828 -0.44257352f, 0.54939759f,
1829 0.01533556f, 0.42751634f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001830 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1831 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1832 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Matteo Martincighc7434122018-11-14 12:27:04 +00001833 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0}; // VTS was {4, 4} -> {0} ?
1834 std::vector<float> recurrentToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001835 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1836 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001837 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1838 std::vector<float> recurrentToForgetWeightsValue{-0.13832897f, -0.05151010f, -0.23590070f, -0.16661474f,
1839 -0.14340827f, 0.36986142f, 0.23414481f, 0.55899000f,
1840 0.10798943f, -0.41174671f, 0.17751795f, -0.34484994f,
1841 -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001842 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1843 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001844 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1845 std::vector<float> recurrentToCellWeightsValue{ 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f,
1846 0.42957711f, 0.01841056f, -0.32764608f, -0.33027974f,
1847 -0.10826075f, 0.20675004f, 0.19069612f, -0.03026325f,
1848 -0.54532051f, 0.33003211f, 0.44901288f, 0.21193194f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001849 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1850 // [num_units, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001851 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1852 std::vector<float> recurrentToOutputWeightsValue{0.41613156f, 0.42610586f, -0.16495961f, -0.56638730f,
1853 0.30579174f, -0.05115908f, -0.33941799f, 0.23364776f,
1854 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1855 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001856 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001857 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1858 std::vector<float> cellToInputWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001859 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001860 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1861 std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001862 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001863 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1864 std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001865 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001866 hidl_vec<uint32_t> inputGateBiasDimensions{0}; // VTS was {4} -> {0} ?
1867 std::vector<float> inputGateBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001868 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001869 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1870 std::vector<float> forgetGateBiasValue{1.0f, 1.0f, 1.0f, 1.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001871 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001872 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1873 std::vector<float> cellBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001874 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001875 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1876 std::vector<float> outputGateBiasValue(numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001877 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1878 // [output_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001879 hidl_vec<uint32_t> projectionWeightsDimensions{0};
1880 std::vector<float> projectionWeightsValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001881 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001882 hidl_vec<uint32_t> projectionBiasDimensions{0};
1883 std::vector<float> projectionBiasValue;
telsoa01ce3e84a2018-08-31 09:31:35 +01001884
1885 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001886 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1887 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001888 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001889 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1890 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001891
Matteo Martincighc7434122018-11-14 12:27:04 +00001892 // Constant scalar values (the VTS test adds these as tensors of dim {})
telsoa01ce3e84a2018-08-31 09:31:35 +01001893 // 20: The activation function: A value indicating the activation function:
1894 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
Matteo Martincighc7434122018-11-14 12:27:04 +00001895 hidl_vec<uint32_t> activationFunctionDimensions{};
1896 std::vector<int32_t> activationFunctionValue{4};
telsoa01ce3e84a2018-08-31 09:31:35 +01001897 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1898 // If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001899 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
1900 std::vector<float> cellClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001901 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1902 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
Matteo Martincighc7434122018-11-14 12:27:04 +00001903 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
1904 std::vector<float> projectionClippingThresholdValue{0.0f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001905
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001906 // Normalization:
1907 // 23:The input layer normalization weights. A 1-D tensor of shape [num_units].
1908 // Used to rescale normalized inputs to activation at input gate.
1909 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1910 std::vector<float> inputLayerNormWeightsValue;
1911 // 24:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1912 // Used to rescale normalized inputs to activation at forget gate.
1913 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1914 std::vector<float> forgetLayerNormWeightsValue;
1915 // 25:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1916 // Used to rescale normalized inputs to activation at cell gate.
1917 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1918 std::vector<float> cellLayerNormWeightsValue;
1919 // 26:The output layer normalization weights. A 1-D tensor of shape [num_units].
1920 // Used to rescale normalized inputs to activation at output gate.
1921 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1922 std::vector<float> outputLayerNormWeightsValue;
1923
telsoa01ce3e84a2018-08-31 09:31:35 +01001924 // Outputs:
1925 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1926 // CIFG, or [batch_size, num_units * 3] without CIFG.
Matteo Martincighc7434122018-11-14 12:27:04 +00001927 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
1928 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
1929 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
1930 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
1931 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
1932 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
telsoa01ce3e84a2018-08-31 09:31:35 +01001933 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Matteo Martincighc7434122018-11-14 12:27:04 +00001934 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
1935 std::vector<float> outputStateOutValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1936 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001937 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Matteo Martincighc7434122018-11-14 12:27:04 +00001938 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1939 std::vector<float> cellStateOutValue{-0.76044439f, -0.01804161f, 0.18226376f, -0.06493707f,
1940 -0.90477051f, -0.04355603f, 0.18475688f, -0.04158677f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001941 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1942 // effectively the same as the current “output state (out)” value.
Matteo Martincighc7434122018-11-14 12:27:04 +00001943 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
1944 std::vector<float> outputValue{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1945 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f};
telsoa01ce3e84a2018-08-31 09:31:35 +01001946
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001947 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1948 inputToInputWeightsDimensions, inputToInputWeightsValue,
1949 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1950 inputToCellWeightsDimensions, inputToCellWeightsValue,
1951 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1952 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1953 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1954 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1955 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1956 cellToInputWeightsDimensions, cellToInputWeightsValue,
1957 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1958 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1959 inputGateBiasDimensions, inputGateBiasValue,
1960 forgetGateBiasDimensions, forgetGateBiasValue,
1961 cellBiasDimensions, cellBiasValue,
1962 outputGateBiasDimensions, outputGateBiasValue,
1963 projectionWeightsDimensions, projectionWeightsValue,
1964 projectionBiasDimensions, projectionBiasValue,
1965 outputStateInDimensions, outputStateInValue,
1966 cellStateInDimensions, cellStateInValue,
1967 activationFunctionDimensions, activationFunctionValue,
1968 cellClippingThresholdDimensions, cellClippingThresholdValue,
1969 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
1970 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1971 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1972 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1973 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1974 scratchBufferDimensions, scratchBufferValue,
1975 outputStateOutDimensions, outputStateOutValue,
1976 cellStateOutDimensions, cellStateOutValue,
1977 outputDimensions, outputValue,
1978 compute);
Matteo Martincighc7434122018-11-14 12:27:04 +00001979}
Matteo Martincighc7434122018-11-14 12:27:04 +00001980
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001981template <typename HalPolicy>
1982void LstmNoCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
Matteo Martincighc7434122018-11-14 12:27:04 +00001983{
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001984 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
1985 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
1986 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
1987
1988 uint32_t batchSize = 2;
1989 uint32_t inputSize = 5;
1990 uint32_t numUnits = 4;
1991 uint32_t outputSize = 3;
1992
1993 // Inputs:
1994 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1995 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1996 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
1997 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
1998 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
1999
2000 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2001 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
2002 hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
2003 std::vector<float> inputToInputWeightsValue{ 0.5, 0.6, 0.7, -0.8, -0.9,
2004 0.1, 0.2, 0.3, -0.4, 0.5,
2005 -0.8, 0.7, -0.6, 0.5, -0.4,
2006 -0.5, -0.4, -0.3, -0.2, -0.1};
2007 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2008 // [num_units, input_size].
2009 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
2010 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
2011 -0.5, -0.2, -0.4, 0.3, -0.8,
2012 -0.4, 0.3, -0.5, -0.4, -0.6,
2013 0.3, -0.4, -0.6, -0.5, -0.5};
2014 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
2015 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
2016 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
2017 0.5, -0.2, -0.3, -0.2, -0.6,
2018 0.6, -0.1, -0.4, -0.3, -0.7,
2019 0.7, -0.9, -0.5, 0.8, 0.6};
2020 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2021 // [num_units, input_size].
2022 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
2023 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
2024 -0.7, 0.3, -0.3, -0.8, -0.2,
2025 0.6, -0.2, 0.4, -0.7, -0.3,
2026 -0.5, 0.1, 0.5, -0.6, -0.4};
2027 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2028 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2029 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2030 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
2031 std::vector<float> recurrentToInputWeightsValue{-0.2, -0.3, 0.4,
2032 0.1, -0.5, 0.9,
2033 -0.2, -0.3, -0.7,
2034 0.05, -0.2, -0.6};
2035 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2036 // [num_units, output_size].
2037 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
2038 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
2039 -0.2, 0.6, 0.4,
2040 0.9, 0.3, -0.1,
2041 0.2, 0.5, 0.2};
2042 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2043 // [num_units, output_size].
2044 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
2045 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
2046 -0.3, 0.8,-0.08,
2047 -0.2, 0.3, 0.8,
2048 -0.6, -0.1, 0.2};
2049 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2050 // [num_units, output_size].
2051 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
2052 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
2053 -0.2, -0.5, -0.7,
2054 -0.2, -0.6, -0.1,
2055 -0.4, -0.7, -0.2};
2056 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2057 hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
2058 std::vector<float> cellToInputWeightsValue{0.05, 0.1, 0.25, 0.15};
2059 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2060 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
2061 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
2062 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2063 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
2064 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
2065 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2066 hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
2067 std::vector<float> inputGateBiasValue{0.03, 0.15, 0.22, 0.38};
2068 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2069 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
2070 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
2071 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2072 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
2073 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
2074 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2075 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
2076 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
2077 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2078 // [output_size, num_units].
2079 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
2080 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
2081 -0.2, 0.1, 0.5,
2082 0.3, 0.08, 0.07,
2083 0.2, -0.4, 0.2};
2084 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2085 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2086 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2087 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2088 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2089 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2090 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2091 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2092 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2093
2094 // Constant scalar values (the VTS test adds these as tensors of dim {})
2095 // 20: The activation function: A value indicating the activation function:
2096 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2097 hidl_vec<uint32_t> activationFunctionDimensions{};
2098 std::vector<int32_t> activationFunctionValue{4};
2099 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2100 // If set to 0.0 then clipping is disabled.
2101 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2102 std::vector<float> cellClippingThresholdValue{0.0f};
2103 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2104 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2105 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2106 std::vector<float> projectionClippingThresholdValue{0.0f};
2107
2108 // Normalization:
2109 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2110 // Used to rescale normalized inputs to activation at input gate.
2111 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2112 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2113 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2114 // Used to rescale normalized inputs to activation at forget gate.
2115 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2116 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2117 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2118 // Used to rescale normalized inputs to activation at cell gate.
2119 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2120 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2121 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2122 // Used to rescale normalized inputs to activation at output gate.
2123 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2124 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2125
2126 // Outputs:
2127 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2128 // CIFG, or [batch_size, num_units * 3] without CIFG.
2129 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2130 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2131 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2132 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2133 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 4};
2134 std::vector<float> scratchBufferValue(batchSize * numUnits * 4, 0.0f);
2135 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2136 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2137 std::vector<float> outputStateOutValue { 0.02440767f, 0.12802738f, -0.00170918f,
2138 -0.00692428f, 0.08487406f, 0.06344498f};
2139 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2140 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2141 std::vector<float> cellStateOutValue {-0.45177122f, 0.37691566f, 0.22542511f, 0.23240635f,
2142 -0.25258583f, 0.33042118f, 0.01730525f, 0.36660123f};
2143 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2144 // effectively the same as the current “output state (out)” value.
2145 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2146 std::vector<float> outputValue{ 0.02440767f, 0.12802738f, -0.00170918f,
2147 -0.00692428f, 0.08487406f, 0.06344498f};
2148
2149 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2150 inputToInputWeightsDimensions, inputToInputWeightsValue,
2151 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2152 inputToCellWeightsDimensions, inputToCellWeightsValue,
2153 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2154 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2155 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2156 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2157 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2158 cellToInputWeightsDimensions, cellToInputWeightsValue,
2159 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2160 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2161 inputGateBiasDimensions, inputGateBiasValue,
2162 forgetGateBiasDimensions, forgetGateBiasValue,
2163 cellBiasDimensions, cellBiasValue,
2164 outputGateBiasDimensions, outputGateBiasValue,
2165 projectionWeightsDimensions, projectionWeightsValue,
2166 projectionBiasDimensions, projectionBiasValue,
2167 outputStateInDimensions, outputStateInValue,
2168 cellStateInDimensions, cellStateInValue,
2169 activationFunctionDimensions, activationFunctionValue,
2170 cellClippingThresholdDimensions, cellClippingThresholdValue,
2171 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2172 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2173 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2174 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2175 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2176 scratchBufferDimensions, scratchBufferValue,
2177 outputStateOutDimensions, outputStateOutValue,
2178 cellStateOutDimensions, cellStateOutValue,
2179 outputDimensions, outputValue,
2180 compute);
Matteo Martincighc7434122018-11-14 12:27:04 +00002181}
2182
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002183template <typename HalPolicy>
2184void LstmCifgPeepholeProjectionNoClippingLayerNorm(armnn::Compute compute)
Matteo Martincighc7434122018-11-14 12:27:04 +00002185{
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002186 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/layer_norm_lstm.model.cpp
2187 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/layer_norm_lstm.example.cpp
2188 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
Matteo Martincighc7434122018-11-14 12:27:04 +00002189
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002190 uint32_t batchSize = 2;
2191 uint32_t inputSize = 5;
2192 uint32_t numUnits = 4;
2193 uint32_t outputSize = 3;
Matteo Martincighc7434122018-11-14 12:27:04 +00002194
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002195 // Inputs:
2196 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
2197 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
2198 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
2199 std::vector<float> inputValue{ 0.7f, 0.8f, 0.1f, 0.2f, 0.3f, // batch 0
2200 0.3f, 0.2f, 0.9f, 0.8f, 0.1f}; // batch 1
telsoa01ce3e84a2018-08-31 09:31:35 +01002201
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002202 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2203 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
2204 hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
2205 std::vector<float> inputToInputWeightsValue;
2206 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2207 // [num_units, input_size].
2208 hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
2209 std::vector<float> inputToForgetWeightsValue{-0.6, -0.1, 0.3, 0.2, 0.9,
2210 -0.5, -0.2, -0.4, 0.3, -0.8,
2211 -0.4, 0.3, -0.5, -0.4, -0.6,
2212 0.3, -0.4, -0.6, -0.5, -0.5};
2213 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
2214 hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
2215 std::vector<float> inputToCellWeightsValue{-0.4, -0.3, -0.2, -0.1, -0.5,
2216 0.5, -0.2, -0.3, -0.2, -0.6,
2217 0.6, -0.1, -0.4, -0.3, -0.7,
2218 0.7, -0.9, -0.5, 0.8, 0.6};
2219 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2220 // [num_units, input_size].
2221 hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
2222 std::vector<float> inputToOutputWeightsValue{-0.8, -0.4, -0.2, -0.9, -0.1,
2223 -0.7, 0.3, -0.3, -0.8, -0.2,
2224 0.6, -0.2, 0.4, -0.7, -0.3,
2225 -0.5, 0.1, 0.5, -0.6, -0.4};
2226 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2227 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2228 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2229 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0};
2230 std::vector<float> recurrentToInputWeightsValue;
2231 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2232 // [num_units, output_size].
2233 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
2234 std::vector<float> recurrentToForgetWeightsValue{-0.5, -0.3, -0.5,
2235 -0.2, 0.6, 0.4,
2236 0.9, 0.3, -0.1,
2237 0.2, 0.5, 0.2};
2238 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2239 // [num_units, output_size].
2240 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
2241 std::vector<float> recurrentToCellWeightsValue{-0.3, 0.2, 0.1,
2242 -0.3, 0.8,-0.08,
2243 -0.2, 0.3, 0.8,
2244 -0.6, -0.1, 0.2};
2245 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2246 // [num_units, output_size].
2247 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
2248 std::vector<float> recurrentToOutputWeightsValue{ 0.3, -0.1, 0.1,
2249 -0.2, -0.5, -0.7,
2250 -0.2, -0.6, -0.1,
2251 -0.4, -0.7, -0.2};
2252 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2253 hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
2254 std::vector<float> cellToInputWeightsValue;
2255 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2256 hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
2257 std::vector<float> cellToForgetWeightsValue{-0.02, -0.15, -0.25, -0.03};
2258 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2259 hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
2260 std::vector<float> cellToOutputWeightsValue{0.1, -0.1, -0.5, 0.05};
2261 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2262 hidl_vec<uint32_t> inputGateBiasDimensions{0};
2263 std::vector<float> inputGateBiasValue;
2264 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2265 hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
2266 std::vector<float> forgetGateBiasValue{0.1, -0.3, -0.2, 0.1};
2267 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2268 hidl_vec<uint32_t> cellBiasDimensions{numUnits};
2269 std::vector<float> cellBiasValue{-0.05, 0.72, 0.25, 0.08};
2270 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2271 hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
2272 std::vector<float> outputGateBiasValue{0.05, -0.01, 0.2, 0.1};
2273 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2274 // [output_size, num_units].
2275 hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
2276 std::vector<float> projectionWeightsValue{-0.1, 0.2, 0.01,
2277 -0.2, 0.1, 0.5,
2278 0.3, 0.08, 0.07,
2279 0.2, -0.4, 0.2};
2280 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2281 hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
2282 std::vector<float> projectionBiasValue(outputSize, 0.0f);
2283 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2284 hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
2285 std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
2286 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2287 hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
2288 std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
2289
2290 // Constant scalar values (the VTS test adds these as tensors of dim {})
2291 // 20: The activation function: A value indicating the activation function:
2292 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2293 hidl_vec<uint32_t> activationFunctionDimensions{};
2294 std::vector<int32_t> activationFunctionValue{4};
2295 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2296 // If set to 0.0 then clipping is disabled.
2297 hidl_vec<uint32_t> cellClippingThresholdDimensions{};
2298 std::vector<float> cellClippingThresholdValue{0.0f};
2299 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2300 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2301 hidl_vec<uint32_t> projectionClippingThresholdDimensions{};
2302 std::vector<float> projectionClippingThresholdValue{0.0f};
2303
2304 // Normalization:
2305 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2306 // Used to rescale normalized inputs to activation at input gate.
2307 hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
2308 std::vector<float> inputLayerNormWeightsValue{0.1, 0.2, 0.3, 0.5};
2309 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2310 // Used to rescale normalized inputs to activation at forget gate.
2311 hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
2312 std::vector<float> forgetLayerNormWeightsValue{0.2, 0.2, 0.4, 0.3};
2313 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2314 // Used to rescale normalized inputs to activation at cell gate.
2315 hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
2316 std::vector<float> cellLayerNormWeightsValue{0.7, 0.2, 0.3, 0.8};
2317 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2318 // Used to rescale normalized inputs to activation at output gate.
2319 hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
2320 std::vector<float> outputLayerNormWeightsValue{0.6, 0.2, 0.2, 0.5};
2321
2322 // Outputs:
2323 // 0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
2324 // CIFG, or [batch_size, num_units * 3] without CIFG.
2325 // HOWEVER, by looking at the code, seems that it's the opposite: (cifg ? 3 : 4) * numUnits
2326 // Refer to: android/frameworks/ml/nn/common/operations/LSTM.cpp:319
2327 // android/frameworks/ml/nn/common/operations/LSTMTest.cpp:114
2328 // tensorflow/tensorflow/contrib/lite/kernels/lstm.cc:332
2329 hidl_vec<uint32_t> scratchBufferDimensions{batchSize, numUnits * 3};
2330 std::vector<float> scratchBufferValue(batchSize * numUnits * 3, 0.0f);
2331 // 1: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2332 hidl_vec<uint32_t> outputStateOutDimensions{batchSize, outputSize};
2333 std::vector<float> outputStateOutValue { 0.02129706f, 0.14081624f, 0.01127331f,
2334 -0.02263505f, 0.09169482f, 0.07691758f};
2335 // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2336 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
2337 std::vector<float> cellStateOutValue{-0.35102980f, 0.42610350f, 0.21463650f, 0.27716520f,
2338 -0.18855170f, 0.32522000f, 0.02036650f, 0.48967660f};
2339 // 3: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2340 // effectively the same as the current “output state (out)” value.
2341 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2342 std::vector<float> outputValue{ 0.02129706f, 0.14081624f, 0.01127331f,
2343 -0.02263505f, 0.09169482f, 0.07691758f};
2344
2345 LstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2346 inputToInputWeightsDimensions, inputToInputWeightsValue,
2347 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2348 inputToCellWeightsDimensions, inputToCellWeightsValue,
2349 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2350 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2351 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2352 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2353 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2354 cellToInputWeightsDimensions, cellToInputWeightsValue,
2355 cellToForgetWeightsDimensions, cellToForgetWeightsValue,
2356 cellToOutputWeightsDimensions, cellToOutputWeightsValue,
2357 inputGateBiasDimensions, inputGateBiasValue,
2358 forgetGateBiasDimensions, forgetGateBiasValue,
2359 cellBiasDimensions, cellBiasValue,
2360 outputGateBiasDimensions, outputGateBiasValue,
2361 projectionWeightsDimensions, projectionWeightsValue,
2362 projectionBiasDimensions, projectionBiasValue,
2363 outputStateInDimensions, outputStateInValue,
2364 cellStateInDimensions, cellStateInValue,
2365 activationFunctionDimensions, activationFunctionValue,
2366 cellClippingThresholdDimensions, cellClippingThresholdValue,
2367 projectionClippingThresholdDimensions, projectionClippingThresholdValue,
2368 inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
2369 forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
2370 cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
2371 outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
2372 scratchBufferDimensions, scratchBufferValue,
2373 outputStateOutDimensions, outputStateOutValue,
2374 cellStateOutDimensions, cellStateOutValue,
2375 outputDimensions, outputValue,
2376 compute);
2377}
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01002378
2379template <typename HalPolicy>
2380void QuantizedLstm(armnn::Compute compute)
2381{
Jan Eilers0b7a4192020-03-09 18:20:42 +00002382 armnn::IgnoreUnused(compute);
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01002383 // This replicates android/frameworks/ml/nn/runtime/test/generated/vts_models/quantized_lstm.model.cpp
2384 // with values from android/frameworks/ml/nn/runtime/test/generated/examples/quantized_lstm.example.cpp
2385 // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of MODEL_INPUT tensors).
2386
2387 uint32_t batchSize = 2;
2388 uint32_t inputSize = 2;
2389 uint32_t outputSize = 4;
2390
2391 // Inputs:
2392 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
2393 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
2394 hidl_vec<uint32_t> inputDimensions{batchSize, inputSize};
2395 std::vector<uint8_t> inputValue{166, 179, 50, 150};
2396
2397 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2398 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
2399 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2400 hidl_vec<uint32_t> inputToInputWeightsDimensions{outputSize, inputSize};
2401 std::vector<uint8_t> inputToInputWeightsValue{146, 250, 235, 171, 10, 218, 171, 108};
2402 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2403 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
2404 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2405 hidl_vec<uint32_t> inputToForgetWeightsDimensions{outputSize, inputSize};
2406 std::vector<uint8_t> inputToForgetWeightsValue{24, 50, 132, 179, 158, 110, 3, 169};
2407 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2408 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
2409 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2410 hidl_vec<uint32_t> inputToCellWeightsDimensions{outputSize, inputSize};
2411 std::vector<uint8_t> inputToCellWeightsValue{133, 34, 29, 49, 206, 109, 54, 183};
2412 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2413 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
2414 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
2415 hidl_vec<uint32_t> inputToOutputWeightsDimensions{outputSize, inputSize};
2416 std::vector<uint8_t> inputToOutputWeightsValue{195, 187, 11, 99, 109, 10, 218, 48};
2417 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2418 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
2419 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2420 hidl_vec<uint32_t> recurrentToInputWeightsDimensions{outputSize, outputSize};
2421 std::vector<uint8_t> recurrentToInputWeightsValue{254, 206, 77, 168, 71, 20, 215, 6,
2422 223, 7, 118, 225, 59, 130, 174, 26};
2423 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2424 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
2425 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2426 hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{outputSize, outputSize};
2427 std::vector<uint8_t> recurrentToForgetWeightsValue{137, 240, 103, 52, 68, 51, 237, 112,
2428 0, 220, 89, 23, 69, 4, 207, 253};
2429 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2430 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
2431 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2432 hidl_vec<uint32_t> recurrentToCellWeightsDimensions{outputSize, outputSize};
2433 std::vector<uint8_t> recurrentToCellWeightsValue{172, 60, 205, 65, 14, 0, 140, 168,
2434 240, 223, 133, 56, 142, 64, 246, 216};
2435 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2436 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
2437 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
2438 hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{outputSize, outputSize};
2439 std::vector<uint8_t> recurrentToOutputWeightsValue{106, 214, 67, 23, 59, 158, 45, 3,
2440 119, 132, 49, 205, 129, 218, 11, 98};
2441 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
2442 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2443 // of input and weights scales and zeroPoint equal to 0.
2444 hidl_vec<uint32_t> inputGateBiasDimensions{outputSize};
2445 std::vector<int32_t> inputGateBiasValue{-7876, 13488, -726, 32839};
2446 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
2447 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2448 // of input and weights scales and zeroPoint equal to 0.
2449 hidl_vec<uint32_t> forgetGateBiasDimensions{outputSize};
2450 std::vector<int32_t> forgetGateBiasValue{9206, -46884, -11693, -38724};
2451 // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
2452 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
2453 // and weights scales and zeroPoint equal to 0.
2454 hidl_vec<uint32_t> cellBiasDimensions{outputSize};
2455 std::vector<int32_t> cellBiasValue{39481, 48624, 48976, -21419};
2456 // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
2457 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
2458 // of input and weights scales and zeroPoint equal to 0.
2459 hidl_vec<uint32_t> outputGateBiasDimensions{outputSize};
2460 std::vector<int32_t> outputGateBiasValue{-58999, -17050, -41852, -40538};
2461
2462 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
2463 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
2464 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
2465 hidl_vec<uint32_t> previousCellStateInDimensions{batchSize, outputSize};
2466 std::vector<int16_t> previousCellStateInValue{876, 1034, 955, -909, 761, 1029, 796, -1036};
2467 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
2468 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
2469 // is quantized with a fixed quantization range of -1, 127/128.
2470 hidl_vec<uint32_t> previousOutputInDimensions{batchSize, outputSize};
2471 std::vector<uint8_t> previousOutputInValue{136, 150, 140, 115, 135, 152, 138, 112};
2472
2473 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
2474 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
2475 // of -2^4, 2^4 * 32767/32768.
2476 hidl_vec<uint32_t> cellStateOutDimensions{batchSize, outputSize};
2477 std::vector<int16_t> cellStateOutValue {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
2478 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
2479 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
2480 hidl_vec<uint32_t> outputDimensions{batchSize, outputSize};
2481 std::vector<uint8_t> outputValue {140, 151, 146, 112, 136, 156, 142, 112};
2482
2483
2484 QuantizedLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
2485 inputToInputWeightsDimensions, inputToInputWeightsValue,
2486 inputToForgetWeightsDimensions, inputToForgetWeightsValue,
2487 inputToCellWeightsDimensions, inputToCellWeightsValue,
2488 inputToOutputWeightsDimensions, inputToOutputWeightsValue,
2489 recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
2490 recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
2491 recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
2492 recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
2493 inputGateBiasDimensions, inputGateBiasValue,
2494 forgetGateBiasDimensions, forgetGateBiasValue,
2495 cellBiasDimensions, cellBiasValue,
2496 outputGateBiasDimensions, outputGateBiasValue,
2497 previousOutputInDimensions, previousOutputInValue,
2498 previousCellStateInDimensions, previousCellStateInValue,
2499 cellStateOutDimensions, cellStateOutValue,
2500 outputDimensions, outputValue);
2501}