blob: 175e908817ab078c25b75239d20b35756044fb9f [file] [log] [blame]
arovir019e53a352018-08-31 15:26:35 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
arovir019e53a352018-08-31 15:26:35 +01004//
5
6#include "NeonLstmFloatWorkload.hpp"
Les Bellde9011b2018-10-03 10:37:52 +01007#include "NeonWorkloadUtils.hpp"
8
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00009#include "aclCommon/ArmComputeTensorUtils.hpp"
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000010
Matthew Sloyan171214c2020-09-09 09:07:37 +010011#include <armnn/utility/NumericCast.hpp>
12
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000013#include "neon/NeonTensorHandle.hpp"
arovir019e53a352018-08-31 15:26:35 +010014
15namespace armnn
16{
Les Bellde9011b2018-10-03 10:37:52 +010017using namespace armcomputetensorutils;
18
19NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info)
arovir019e53a352018-08-31 15:26:35 +010020 : FloatWorkload<LstmQueueDescriptor>(descriptor, info)
21{
Les Bellde9011b2018-10-03 10:37:52 +010022 arm_compute::LSTMParams<arm_compute::ITensor> lstm_param;
23
24 // Basic parameters
25 m_InputToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
26 BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo());
27
28 m_InputToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
29 BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo());
30
31 m_InputToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
32 BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo());
33
34 m_RecurrentToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
35 BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTensorInfo());
36
37 m_RecurrentToCellWeightsTensor = std::make_unique<arm_compute::Tensor>();
38 BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorInfo());
39
40 m_RecurrentToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
41 BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTensorInfo());
42
43 m_ForgetGateBiasTensor = std::make_unique<arm_compute::Tensor>();
44 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo());
45
46 m_CellBiasTensor = std::make_unique<arm_compute::Tensor>();
47 BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo());
48
49 m_OutputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
50 BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo());
51
52 // for future reference: check the AndroidNN API for the logic here
53 if (!m_Data.m_Parameters.m_CifgEnabled)
54 {
55 m_InputToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
56 BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo());
57
58 m_RecurrentToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
59 BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTensorInfo());
60
61 m_CellToInputWeightsTensor = std::make_unique<arm_compute::Tensor>();
62 if (m_Data.m_CellToInputWeights != nullptr)
63 {
64 BuildArmComputeTensor(*m_CellToInputWeightsTensor, m_Data.m_CellToInputWeights->GetTensorInfo());
65 }
66
67 m_InputGateBiasTensor = std::make_unique<arm_compute::Tensor>();
68 BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo());
69
70 lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),
71 m_RecurrentToInputWeightsTensor.get(),
72 m_Data.m_CellToInputWeights != nullptr ? m_CellToInputWeightsTensor.get() : nullptr,
73 m_InputGateBiasTensor.get());
74 }
75
76 if (m_Data.m_Parameters.m_ProjectionEnabled)
77 {
78 m_ProjectionWeightsTensor = std::make_unique<arm_compute::Tensor>();
79 BuildArmComputeTensor(*m_ProjectionWeightsTensor, m_Data.m_ProjectionWeights->GetTensorInfo());
80
81 m_ProjectionBiasTensor = std::make_unique<arm_compute::Tensor>();
82 if (m_Data.m_ProjectionBias != nullptr)
83 {
84 BuildArmComputeTensor(*m_ProjectionBiasTensor, m_Data.m_ProjectionBias->GetTensorInfo());
85 }
86
87 lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),
88 m_Data.m_ProjectionBias != nullptr ? m_ProjectionBiasTensor.get() : nullptr);
89 }
90
91 if (m_Data.m_Parameters.m_PeepholeEnabled)
92 {
93 m_CellToForgetWeightsTensor = std::make_unique<arm_compute::Tensor>();
94 BuildArmComputeTensor(*m_CellToForgetWeightsTensor, m_Data.m_CellToForgetWeights->GetTensorInfo());
95
96 m_CellToOutputWeightsTensor = std::make_unique<arm_compute::Tensor>();
97 BuildArmComputeTensor(*m_CellToOutputWeightsTensor, m_Data.m_CellToOutputWeights->GetTensorInfo());
98
99 lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
100 }
101
Jan Eilersad5293a2019-07-08 09:57:55 +0100102 if (m_Data.m_Parameters.m_LayerNormEnabled)
103 {
104 m_InputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
105 if (!m_Data.m_Parameters.m_CifgEnabled)
106 {
107 BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights->GetTensorInfo());
108 }
109
110 m_ForgetLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
111 BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights->GetTensorInfo());
112
113 m_CellLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
114 BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights->GetTensorInfo());
115
116 m_OutputLayerNormWeightsTensor = std::make_unique<arm_compute::Tensor>();
117 BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights->GetTensorInfo());
118
119 lstm_param.set_layer_normalization_params(m_Data.m_Parameters.m_CifgEnabled ?
120 nullptr : m_InputLayerNormWeightsTensor.get(),
121 m_ForgetLayerNormWeightsTensor.get(),
122 m_CellLayerNormWeightsTensor.get(),
123 m_OutputLayerNormWeightsTensor.get());
124 }
125
Derek Lambertic81855f2019-06-13 17:34:19 +0100126 const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
127 const arm_compute::ITensor& output_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
128 const arm_compute::ITensor& cell_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
Les Bellde9011b2018-10-03 10:37:52 +0100129
Derek Lambertic81855f2019-06-13 17:34:19 +0100130 arm_compute::ITensor& output_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
131 arm_compute::ITensor& cell_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
132 arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[3])->GetTensor();
Les Bellde9011b2018-10-03 10:37:52 +0100133
134 // Get the batch_size and the num_units from the cellStateIn dimensions
135 const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2];
Matthew Sloyan171214c2020-09-09 09:07:37 +0100136 const unsigned int batch_size = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[0]);
137 const unsigned int num_units = armnn::numeric_cast<unsigned int>(inputTensorInfo.GetShape()[1]);
Les Bellde9011b2018-10-03 10:37:52 +0100138
139 m_ScratchBuffer = std::make_unique<arm_compute::Tensor>();
140 if (m_Data.m_Parameters.m_CifgEnabled)
141 {
Jan Eilersad5293a2019-07-08 09:57:55 +0100142 // 2D tensor with dimensions [num_units * 3, batch_size] with CIFG
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000143 armnn::TensorInfo scratchBuffer1({ batch_size, num_units * 3 }, DataType::Float32);
Les Bellde9011b2018-10-03 10:37:52 +0100144 BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer1);
145 }
146 else
147 {
Jan Eilersad5293a2019-07-08 09:57:55 +0100148 // scratch_buffer [num_units * 4, batch_size] without CIFG
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000149 armnn::TensorInfo scratchBuffer2({ batch_size, num_units * 4 }, DataType::Float32);
Les Bellde9011b2018-10-03 10:37:52 +0100150 BuildArmComputeTensor(*m_ScratchBuffer, scratchBuffer2);
151 }
152
153 float cell_threshold = m_Data.m_Parameters.m_ClippingThresCell;
154 float projection_threshold = m_Data.m_Parameters.m_ClippingThresProj;
155
156 // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
157 arm_compute::ActivationLayerInfo activationLayerInfo;
158 if (m_Data.m_Parameters.m_ActivationFunc == 0)
159 {
160 // no activation, do nothing
161 }
162 else if (m_Data.m_Parameters.m_ActivationFunc == 1)
163 {
164 activationLayerInfo = arm_compute::ActivationLayerInfo(
165 arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
166 }
167 else if (m_Data.m_Parameters.m_ActivationFunc == 3)
168 {
169 activationLayerInfo = arm_compute::ActivationLayerInfo(
170 arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
171 }
172 else if (m_Data.m_Parameters.m_ActivationFunc == 4)
173 {
174 activationLayerInfo = arm_compute::ActivationLayerInfo(
175 arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
176 }
177 else if (m_Data.m_Parameters.m_ActivationFunc == 6)
178 {
179 activationLayerInfo = arm_compute::ActivationLayerInfo(
180 arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
181 }
182 else
183 {
184 throw armnn::Exception("Wrong Type of Activation Function!");
185 }
186
187
188 m_LstmLayer.configure(&input, m_InputToForgetWeightsTensor.get(), m_InputToCellWeightsTensor.get(),
189 m_InputToOutputWeightsTensor.get(), m_RecurrentToForgetWeightsTensor.get(),
190 m_RecurrentToCellWeightsTensor.get(), m_RecurrentToOutputWeightsTensor.get(),
191 m_ForgetGateBiasTensor.get(), m_CellBiasTensor.get(), m_OutputGateBiasTensor.get(),
192 &output_state_in, &cell_state_in, m_ScratchBuffer.get(), &output_state_out,
193 &cell_state_out, &output, lstm_param, activationLayerInfo,
194 cell_threshold, projection_threshold);
195
196 armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
197
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100198 InitializeArmComputeTensorData(*m_InputToForgetWeightsTensor,
199 m_Data.m_InputToForgetWeights);
200 InitializeArmComputeTensorData(*m_InputToCellWeightsTensor,
201 m_Data.m_InputToCellWeights);
202 InitializeArmComputeTensorData(*m_InputToOutputWeightsTensor,
203 m_Data.m_InputToOutputWeights);
204 InitializeArmComputeTensorData(*m_RecurrentToForgetWeightsTensor,
205 m_Data.m_RecurrentToForgetWeights);
206 InitializeArmComputeTensorData(*m_RecurrentToCellWeightsTensor,
207 m_Data.m_RecurrentToCellWeights);
208 InitializeArmComputeTensorData(*m_RecurrentToOutputWeightsTensor,
209 m_Data.m_RecurrentToOutputWeights);
210 InitializeArmComputeTensorData(*m_ForgetGateBiasTensor,
211 m_Data.m_ForgetGateBias);
212 InitializeArmComputeTensorData(*m_CellBiasTensor,
213 m_Data.m_CellBias);
214 InitializeArmComputeTensorData(*m_OutputGateBiasTensor,
215 m_Data.m_OutputGateBias);
Les Bellde9011b2018-10-03 10:37:52 +0100216
217 if (!m_Data.m_Parameters.m_CifgEnabled)
218 {
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100219 InitializeArmComputeTensorData(*m_InputToInputWeightsTensor,
220 m_Data.m_InputToInputWeights);
221 InitializeArmComputeTensorData(*m_RecurrentToInputWeightsTensor,
222 m_Data.m_RecurrentToInputWeights);
Les Bellde9011b2018-10-03 10:37:52 +0100223 if (m_Data.m_CellToInputWeights != nullptr)
224 {
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100225 InitializeArmComputeTensorData(*m_CellToInputWeightsTensor,
226 m_Data.m_CellToInputWeights);
Les Bellde9011b2018-10-03 10:37:52 +0100227 }
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100228 InitializeArmComputeTensorData(*m_InputGateBiasTensor,
229 m_Data.m_InputGateBias);
Les Bellde9011b2018-10-03 10:37:52 +0100230 }
231
232 if (m_Data.m_Parameters.m_ProjectionEnabled)
233 {
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100234 InitializeArmComputeTensorData(*m_ProjectionWeightsTensor,
235 m_Data.m_ProjectionWeights);
Les Bellde9011b2018-10-03 10:37:52 +0100236 if (m_Data.m_ProjectionBias != nullptr)
237 {
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100238 InitializeArmComputeTensorData(*m_ProjectionBiasTensor,
239 m_Data.m_ProjectionBias);
Les Bellde9011b2018-10-03 10:37:52 +0100240 }
241 }
242
243 if (m_Data.m_Parameters.m_PeepholeEnabled)
244 {
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100245 InitializeArmComputeTensorData(*m_CellToForgetWeightsTensor,
246 m_Data.m_CellToForgetWeights);
247 InitializeArmComputeTensorData(*m_CellToOutputWeightsTensor,
248 m_Data.m_CellToOutputWeights);
Les Bellde9011b2018-10-03 10:37:52 +0100249 }
250
Jan Eilersad5293a2019-07-08 09:57:55 +0100251 if (m_Data.m_Parameters.m_LayerNormEnabled)
252 {
253 if (!m_Data.m_Parameters.m_CifgEnabled)
254 {
255 InitializeArmComputeTensorData(*m_InputLayerNormWeightsTensor, m_Data.m_InputLayerNormWeights);
256 }
257 InitializeArmComputeTensorData(*m_ForgetLayerNormWeightsTensor, m_Data.m_ForgetLayerNormWeights);
258 InitializeArmComputeTensorData(*m_CellLayerNormWeightsTensor, m_Data.m_CellLayerNormWeights);
259 InitializeArmComputeTensorData(*m_OutputLayerNormWeightsTensor, m_Data.m_OutputLayerNormWeights);
260 }
261
Les Bellde9011b2018-10-03 10:37:52 +0100262 // Force Compute Library to perform the necessary copying and reshaping, after which
263 // delete all the input tensors that will no longer be needed
264 m_LstmLayer.prepare();
265 FreeUnusedTensors();
arovir019e53a352018-08-31 15:26:35 +0100266}
267
268void NeonLstmFloatWorkload::Execute() const
269{
Les Bellde9011b2018-10-03 10:37:52 +0100270 m_LstmLayer.run();
arovir019e53a352018-08-31 15:26:35 +0100271}
272
Les Bellde9011b2018-10-03 10:37:52 +0100273arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo& input,
274 const TensorInfo& outputStateIn,
275 const TensorInfo& cellStateIn,
276 const TensorInfo& scratchBuffer,
277 const TensorInfo& outputStateOut,
278 const TensorInfo& cellStateOut,
279 const TensorInfo& output,
280 const LstmDescriptor& descriptor,
Jan Eilersad5293a2019-07-08 09:57:55 +0100281 const LstmInputParamsInfo& paramsInfo)
Les Bellde9011b2018-10-03 10:37:52 +0100282{
283 arm_compute::LSTMParams<arm_compute::ITensorInfo> lstm_params_info;
284
Jan Eilersad5293a2019-07-08 09:57:55 +0100285 // The inputs and outputs
Les Bellde9011b2018-10-03 10:37:52 +0100286 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
287 const arm_compute::TensorInfo aclOutputStateInInfo = BuildArmComputeTensorInfo(outputStateIn);
288 const arm_compute::TensorInfo aclCellStateInInfo = BuildArmComputeTensorInfo(cellStateIn);
289 const arm_compute::TensorInfo aclScratchBufferInfo = BuildArmComputeTensorInfo(scratchBuffer);
290 const arm_compute::TensorInfo aclOutputStateOutInfo = BuildArmComputeTensorInfo(outputStateOut);
291 const arm_compute::TensorInfo aclCellStateOutInfo = BuildArmComputeTensorInfo(cellStateOut);
292 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
293
294 // Basic parameters
Jan Eilersad5293a2019-07-08 09:57:55 +0100295 const arm_compute::TensorInfo aclInputToForgetWeightsInfo
Francis Murtaghbb590b42019-08-14 09:51:36 +0100296 = BuildArmComputeTensorInfo(paramsInfo.GetInputToForgetWeights());
Jan Eilersad5293a2019-07-08 09:57:55 +0100297 const arm_compute::TensorInfo aclInputToCellWeightsInfo
Francis Murtaghbb590b42019-08-14 09:51:36 +0100298 = BuildArmComputeTensorInfo(paramsInfo.GetInputToCellWeights());
Jan Eilersad5293a2019-07-08 09:57:55 +0100299 const arm_compute::TensorInfo aclInputToOutputWeightsInfo
Francis Murtaghbb590b42019-08-14 09:51:36 +0100300 = BuildArmComputeTensorInfo(paramsInfo.GetInputToOutputWeights());
Les Bellde9011b2018-10-03 10:37:52 +0100301 const arm_compute::TensorInfo aclRecurrentToForgetWeightsInfo
Francis Murtaghbb590b42019-08-14 09:51:36 +0100302 = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToForgetWeights());
Les Bellde9011b2018-10-03 10:37:52 +0100303 const arm_compute::TensorInfo aclRecurrentToCellWeightsInfo
Francis Murtaghbb590b42019-08-14 09:51:36 +0100304 = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToCellWeights());
Les Bellde9011b2018-10-03 10:37:52 +0100305 const arm_compute::TensorInfo aclRecurrentToOutputWeightsInfo
Francis Murtaghbb590b42019-08-14 09:51:36 +0100306 = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToOutputWeights());
Jan Eilersad5293a2019-07-08 09:57:55 +0100307 const arm_compute::TensorInfo aclForgetGateBiasInfo
Francis Murtaghbb590b42019-08-14 09:51:36 +0100308 = BuildArmComputeTensorInfo(paramsInfo.GetForgetGateBias());
Jan Eilersad5293a2019-07-08 09:57:55 +0100309 const arm_compute::TensorInfo aclCellBiasInfo
Francis Murtaghbb590b42019-08-14 09:51:36 +0100310 = BuildArmComputeTensorInfo(paramsInfo.GetCellBias());
Jan Eilersad5293a2019-07-08 09:57:55 +0100311 const arm_compute::TensorInfo aclOutputGateBiasInfo
Francis Murtaghbb590b42019-08-14 09:51:36 +0100312 = BuildArmComputeTensorInfo(paramsInfo.GetOutputGateBias());
Les Bellde9011b2018-10-03 10:37:52 +0100313
314 arm_compute::TensorInfo aclInputToInputWeightsInfo;
315 arm_compute::TensorInfo aclRecurrentToInputWeightsInfo;
316 arm_compute::TensorInfo aclCellToInputWeightsInfo;
317 arm_compute::TensorInfo aclInputGateBiasInfo;
318 arm_compute::TensorInfo aclProjectionWeightsInfo;
319 arm_compute::TensorInfo aclProjectionBiasInfo;
320 arm_compute::TensorInfo aclCellToForgetWeightsInfo;
321 arm_compute::TensorInfo aclCellToOutputWeightsInfo;
322
Jan Eilersad5293a2019-07-08 09:57:55 +0100323 arm_compute::TensorInfo aclInputLayerNormWeightsInfo;
324 arm_compute::TensorInfo aclForgetLayerNormWeightsInfo;
325 arm_compute::TensorInfo aclCellLayerNormWeightsInfo;
326 arm_compute::TensorInfo aclOutputLayerNormWeightsInfo;
327
328
Les Bellde9011b2018-10-03 10:37:52 +0100329 if (!descriptor.m_CifgEnabled)
330 {
Jan Eilersad5293a2019-07-08 09:57:55 +0100331 if (descriptor.m_PeepholeEnabled)
Les Bellde9011b2018-10-03 10:37:52 +0100332 {
Francis Murtaghbb590b42019-08-14 09:51:36 +0100333 aclCellToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToInputWeights());
Les Bellde9011b2018-10-03 10:37:52 +0100334 }
Francis Murtaghbb590b42019-08-14 09:51:36 +0100335 aclInputToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputToInputWeights());
336 aclRecurrentToInputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetRecurrentToInputWeights());
337 aclInputGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputGateBias());
Jan Eilersad5293a2019-07-08 09:57:55 +0100338
Les Bellde9011b2018-10-03 10:37:52 +0100339 lstm_params_info.set_cifg_params(&aclInputToInputWeightsInfo, &aclRecurrentToInputWeightsInfo,
Jan Eilersad5293a2019-07-08 09:57:55 +0100340 descriptor.m_PeepholeEnabled ? &aclCellToInputWeightsInfo : nullptr,
Les Bellde9011b2018-10-03 10:37:52 +0100341 &aclInputGateBiasInfo);
342 }
343
344 if (descriptor.m_ProjectionEnabled)
345 {
Jan Eilersad5293a2019-07-08 09:57:55 +0100346 if (paramsInfo.m_ProjectionBias != nullptr)
Les Bellde9011b2018-10-03 10:37:52 +0100347 {
Francis Murtaghbb590b42019-08-14 09:51:36 +0100348 aclProjectionBiasInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionBias());
Les Bellde9011b2018-10-03 10:37:52 +0100349 }
Francis Murtaghbb590b42019-08-14 09:51:36 +0100350 aclProjectionWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetProjectionWeights());
Jan Eilersad5293a2019-07-08 09:57:55 +0100351
Les Bellde9011b2018-10-03 10:37:52 +0100352 lstm_params_info.set_projection_params(&aclProjectionWeightsInfo,
Jan Eilersad5293a2019-07-08 09:57:55 +0100353 paramsInfo.m_ProjectionBias != nullptr ?
354 &aclProjectionBiasInfo : nullptr);
Les Bellde9011b2018-10-03 10:37:52 +0100355 }
356
357 if (descriptor.m_PeepholeEnabled)
358 {
Francis Murtaghbb590b42019-08-14 09:51:36 +0100359 aclCellToForgetWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToForgetWeights());
360 aclCellToOutputWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellToOutputWeights());
Jan Eilersad5293a2019-07-08 09:57:55 +0100361
Les Bellde9011b2018-10-03 10:37:52 +0100362 lstm_params_info.set_peephole_params(&aclCellToForgetWeightsInfo, &aclCellToOutputWeightsInfo);
363 }
364
Jan Eilersad5293a2019-07-08 09:57:55 +0100365 if (descriptor.m_LayerNormEnabled)
366 {
367 if (!descriptor.m_CifgEnabled)
368 {
Francis Murtaghbb590b42019-08-14 09:51:36 +0100369 aclInputLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetInputLayerNormWeights());
Jan Eilersad5293a2019-07-08 09:57:55 +0100370 }
Francis Murtaghbb590b42019-08-14 09:51:36 +0100371 aclForgetLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetForgetLayerNormWeights());
372 aclCellLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetCellLayerNormWeights());
373 aclOutputLayerNormWeightsInfo = BuildArmComputeTensorInfo(paramsInfo.GetOutputLayerNormWeights());
Jan Eilersad5293a2019-07-08 09:57:55 +0100374
375 lstm_params_info.set_layer_normalization_params(descriptor.m_CifgEnabled ?
376 nullptr : &aclInputLayerNormWeightsInfo,
377 &aclForgetLayerNormWeightsInfo,
378 &aclCellLayerNormWeightsInfo,
379 &aclOutputLayerNormWeightsInfo);
380 }
381
Les Bellde9011b2018-10-03 10:37:52 +0100382 float cell_threshold = descriptor.m_ClippingThresCell;
383 float projection_threshold = descriptor.m_ClippingThresProj;
384
385 // for preparing the object for the class ActivationLayerInfo, we need to consider 5 situations
386 arm_compute::ActivationLayerInfo activationLayerInfo;
387 switch (descriptor.m_ActivationFunc)
388 {
389 case 0:
390 // no activation, do nothing
391 break;
392 case 1:
393 activationLayerInfo = arm_compute::ActivationLayerInfo(
394 arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
395 break;
396 case 3:
397 activationLayerInfo = arm_compute::ActivationLayerInfo(
398 arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
399 break;
400 case 4:
401 activationLayerInfo = arm_compute::ActivationLayerInfo(
402 arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
403 break;
404 case 6:
405 activationLayerInfo = arm_compute::ActivationLayerInfo(
406 arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
407 break;
408 default:
409 throw armnn::Exception("Wrong Type of Activation Function!");
410 }
411
412 return arm_compute::NELSTMLayer::validate(&aclInputInfo,
413 &aclInputToForgetWeightsInfo,
414 &aclInputToCellWeightsInfo,
415 &aclInputToOutputWeightsInfo,
416 &aclRecurrentToForgetWeightsInfo,
417 &aclRecurrentToCellWeightsInfo,
418 &aclRecurrentToOutputWeightsInfo,
419 &aclForgetGateBiasInfo,
420 &aclCellBiasInfo,
421 &aclOutputGateBiasInfo,
422 &aclOutputStateInInfo,
423 &aclCellStateInInfo,
424 &aclScratchBufferInfo,
425 &aclOutputStateOutInfo,
426 &aclCellStateOutInfo,
427 &aclOutputInfo,
428 lstm_params_info,
429 activationLayerInfo,
430 cell_threshold,
431 projection_threshold);
432}
433
434void NeonLstmFloatWorkload::FreeUnusedTensors()
435{
436 FreeTensorIfUnused(m_InputToInputWeightsTensor);
437 FreeTensorIfUnused(m_InputToForgetWeightsTensor);
438 FreeTensorIfUnused(m_InputToCellWeightsTensor);
439 FreeTensorIfUnused(m_InputToOutputWeightsTensor);
440 FreeTensorIfUnused(m_RecurrentToInputWeightsTensor);
441 FreeTensorIfUnused(m_RecurrentToForgetWeightsTensor);
442 FreeTensorIfUnused(m_RecurrentToCellWeightsTensor);
443 FreeTensorIfUnused(m_RecurrentToOutputWeightsTensor);
444 FreeTensorIfUnused(m_CellToInputWeightsTensor);
445 FreeTensorIfUnused(m_CellToForgetWeightsTensor);
446 FreeTensorIfUnused(m_CellToOutputWeightsTensor);
447 FreeTensorIfUnused(m_InputGateBiasTensor);
448 FreeTensorIfUnused(m_ForgetGateBiasTensor);
449 FreeTensorIfUnused(m_CellBiasTensor);
450 FreeTensorIfUnused(m_OutputGateBiasTensor);
451 FreeTensorIfUnused(m_ProjectionWeightsTensor);
452 FreeTensorIfUnused(m_ProjectionBiasTensor);
453 FreeTensorIfUnused(m_ScratchBuffer);
Jan Eilersad5293a2019-07-08 09:57:55 +0100454 FreeTensorIfUnused(m_InputLayerNormWeightsTensor);
455 FreeTensorIfUnused(m_ForgetLayerNormWeightsTensor);
456 FreeTensorIfUnused(m_CellLayerNormWeightsTensor);
457 FreeTensorIfUnused(m_OutputLayerNormWeightsTensor);
Les Bellde9011b2018-10-03 10:37:52 +0100458}
459
460} //namespace armnn