blob: 930d311d1d7bb4029368b3ce3168973adcb1e9fb [file] [log] [blame]
Michalis Spyroubcedf512018-03-22 14:55:08 +00001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLLSTMLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
29#include "arm_compute/core/utils/misc/ShapeCalculator.h"
30#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31#include "arm_compute/runtime/CL/CLScheduler.h"
32
33#include <cmath>
34#include <memory>
35#include <tuple>
36
37using namespace arm_compute;
38using namespace arm_compute::misc::shape_calculator;
39
40CLLSTMLayer::CLLSTMLayer(std::shared_ptr<IMemoryManager> memory_manager)
41 : _memory_group(std::move(memory_manager)), _fully_connected_input_gate(), _gemm_input_gate1(), _gemm_input_gate2(), _transpose_input_gate1(), _transpose_input_gate2(), _accum_input_gate1(),
42 _accum_input_gate2(), _subtract_input_gate(), _activation_input_gate(), _fully_connected_forget_gate(), _gemm_forget_gate1(), _gemm_forget_gate2(), _transpose_forget_gate1(),
43 _transpose_forget_gate2(), _accum_forget_gate1(), _accum_forget_gate2(), _activation_forget_gate(), _fully_connected_cell_state(), _gemm_cell_state1(), _gemm_cell_state2(), _transpose_cell_state1(),
44 _accum_cell_state1(), _accum_cell_state2(), _pixelwise_mul_cell_state1(), _activation_cell_state(), _cell_clip(), _pixelwise_mul_cell_state2(), _fully_connected_output(), _gemm_output1(),
45 _gemm_output2(), _transpose_output1(), _transpose_output2(), _accum_output1(), _accum_output2(), _activation_output(), _activation_output_state(), _pixelwise_mul_output_state(),
46 _fully_connected_output_state(), _gemm_output_state(), _accum_output_state(), _projection_clip(), _copy_cell_state(), _copy_output(), _concat_scratch_buffer(), _input_gate_out1(), _input_gate_out2(),
47 _input_gate_out3(), _input_gate_out4(), _input_gate_out5(), _input_gate_out6(), _forget_gate_out1(), _forget_gate_out2(), _forget_gate_out3(), _forget_gate_out4(), _forget_gate_out5(),
48 _forget_gate_out6(), _cell_state_out1(), _cell_state_out2(), _cell_state_out3(), _cell_state_out4(), _cell_state_out5(), _output1(), _output2(), _output3(), _output4(), _output5(), _output6(),
49 _cell_state_activation(), _output_projection1(), _ones(), _run_peephole_opt(false), _run_cifg_opt(false), _perform_cell_clipping(false), _has_projection_weights(false),
50 _perform_projection_clipping(false)
51{
52}
53
54void CLLSTMLayer::configure(const ICLTensor *input, const ICLTensor *input_to_forget_weights, const ICLTensor *input_to_cell_weights, const ICLTensor *input_to_output_weights,
55 const ICLTensor *recurrent_to_forget_weights, const ICLTensor *recurrent_to_cell_weights, const ICLTensor *recurrent_to_output_weights,
56 const ICLTensor *forget_gate_bias, const ICLTensor *cell_bias, const ICLTensor *output_gate_bias,
57 ICLTensor *output_state, ICLTensor *cell_state, ICLTensor *scratch_buffer, ICLTensor *output, const LSTMParams<ICLTensor> &lstm_params, const ActivationLayerInfo &activation_info,
58 float cell_threshold, float projection_threshold)
59{
60 ARM_COMPUTE_ERROR_ON_NULLPTR(input, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights,
61 forget_gate_bias, cell_bias, output_gate_bias, output_state, cell_state);
62 LSTMParams<ITensorInfo> lstm_params_info;
63 if(lstm_params.has_peephole_opt())
64 {
65 lstm_params_info.set_peephole_params(lstm_params.cell_to_input_weights()->info(), lstm_params.cell_to_forget_weights()->info(), lstm_params.cell_to_output_weights()->info());
66 }
67 if(lstm_params.has_projection())
68 {
69 lstm_params_info.set_projection_params(lstm_params.projection_weights()->info(), lstm_params.projection_bias()->info());
70 }
71 if(!lstm_params.has_cifg_opt())
72 {
73 lstm_params_info.set_cifg_params(lstm_params.input_to_input_weights()->info(), lstm_params.recurrent_to_input_weights()->info(),
74 lstm_params.cell_to_input_weights()->info(), lstm_params.input_gate_bias()->info());
75 }
76 ARM_COMPUTE_ERROR_THROW_ON(CLLSTMLayer::validate(input->info(), input_to_forget_weights->info(),
77 input_to_cell_weights->info(), input_to_output_weights->info(),
78 recurrent_to_forget_weights->info(), recurrent_to_cell_weights->info(), recurrent_to_output_weights->info(),
79 forget_gate_bias->info(), cell_bias->info(), output_gate_bias->info(),
80 output_state->info(), cell_state->info(), scratch_buffer->info(), output->info(), lstm_params_info,
81 activation_info, cell_threshold, projection_threshold));
82
83 const TensorShape cell_state_shape = cell_state->info()->tensor_shape();
84
85 TensorShape forget_gate1_shape = compute_transposed_shape(*recurrent_to_output_weights->info());
86 TensorShape forget_gate2_shape = compute_transposed_shape(*forget_gate_bias->info());
87 TensorShape forget_gate3_shape{ 1, output_state->info()->dimension(1) };
88 _forget_gate_out1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
89 _forget_gate_out2.allocator()->init(TensorInfo(forget_gate1_shape, 1, input->info()->data_type()));
90 _forget_gate_out3.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
91 _forget_gate_out6.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
92
93 // Configure block that calculates the forget gate
94 // forget_gate = Activation(input * input_to_forget_weights + output_state * recurrent_to_forget_weights + cell_state * cell_to_forget_weights + forget_gate_bias)
95 _memory_group.manage(&_forget_gate_out1);
96 _fully_connected_forget_gate.configure(input, input_to_forget_weights, forget_gate_bias, &_forget_gate_out1, true, false);
97 _memory_group.manage(&_forget_gate_out2);
98 _transpose_forget_gate1.configure(recurrent_to_forget_weights, &_forget_gate_out2);
99 _memory_group.manage(&_forget_gate_out3);
100 _gemm_forget_gate1.configure(output_state, &_forget_gate_out2, nullptr, &_forget_gate_out3, 1.f, 0.f);
101 _forget_gate_out2.allocator()->allocate();
102 _memory_group.manage(&_forget_gate_out6);
103 _accum_forget_gate1.configure(&_forget_gate_out1, &_forget_gate_out3, &_forget_gate_out6, ConvertPolicy::SATURATE);
104 CLTensor *forget_gate_out = &_forget_gate_out6;
105
106 if(lstm_params.has_peephole_opt())
107 {
108 _forget_gate_out4.allocator()->init(TensorInfo(forget_gate2_shape, 1, input->info()->data_type()));
109 _forget_gate_out5.allocator()->init(TensorInfo(forget_gate3_shape, 1, input->info()->data_type()));
110
111 _run_peephole_opt = true;
112 _memory_group.manage(&_forget_gate_out4);
113 _transpose_forget_gate2.configure(lstm_params.cell_to_forget_weights(), &_forget_gate_out4);
114 _memory_group.manage(&_forget_gate_out5);
115 _gemm_forget_gate2.configure(cell_state, &_forget_gate_out4, nullptr, &_forget_gate_out5, 1.f, 0.f);
116 _forget_gate_out4.allocator()->allocate();
117 _accum_forget_gate2.configure(&_forget_gate_out6, &_forget_gate_out5, &_forget_gate_out3, ConvertPolicy::SATURATE);
118 _forget_gate_out5.allocator()->allocate();
119 _forget_gate_out6.allocator()->allocate();
120 forget_gate_out = &_forget_gate_out3;
121 }
122 else
123 {
124 _forget_gate_out3.allocator()->allocate();
125 }
126 _activation_forget_gate.configure(forget_gate_out, &_forget_gate_out1, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
127 forget_gate_out->allocator()->allocate();
128
129 TensorShape input_gate3_shape{ 1, output_state->info()->dimension(1) };
130 _input_gate_out1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
131 _input_gate_out5.allocator()->init(TensorInfo(input_gate3_shape, 1, input->info()->data_type()));
132
133 // Configure block that calculates the input gate
134 // input_gate = Activation(input * input_to_input_weights + output_state * recurrent_to_input_weights + cell_state * cell_to_input_weights + input_gate_bias), without CIFG
135 // input_gate = 1 - forget_gate, with CIFG
136 if(lstm_params.has_cifg_opt())
137 {
138 _memory_group.manage(&_input_gate_out1);
139 _ones.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
140 _subtract_input_gate.configure(&_ones, &_forget_gate_out1, &_input_gate_out1, ConvertPolicy::SATURATE);
141 _ones.allocator()->allocate();
142 _run_cifg_opt = true;
143 }
144 else
145 {
146 TensorShape input_gate1_shape = compute_transposed_shape(*recurrent_to_output_weights->info());
147 TensorShape input_gate2_shape = compute_transposed_shape(*lstm_params.cell_to_input_weights()->info());
148
149 _input_gate_out2.allocator()->init(TensorInfo(input_gate1_shape, 1, input->info()->data_type()));
150 _input_gate_out3.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
151 _input_gate_out4.allocator()->init(TensorInfo(input_gate2_shape, 1, input->info()->data_type()));
152 _input_gate_out6.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
153
154 _memory_group.manage(&_input_gate_out1);
155 _fully_connected_input_gate.configure(input, lstm_params.input_to_input_weights(), lstm_params.input_gate_bias(), &_input_gate_out1, true, false);
156 _memory_group.manage(&_input_gate_out2);
157 _transpose_input_gate1.configure(lstm_params.recurrent_to_input_weights(), &_input_gate_out2);
158 _memory_group.manage(&_input_gate_out3);
159 _gemm_input_gate1.configure(output_state, &_input_gate_out2, nullptr, &_input_gate_out3, 1.f, 0.f);
160 _input_gate_out2.allocator()->allocate();
161 _memory_group.manage(&_input_gate_out4);
162 _transpose_input_gate2.configure(lstm_params.cell_to_input_weights(), &_input_gate_out4);
163 _memory_group.manage(&_input_gate_out5);
164 _gemm_input_gate2.configure(cell_state, &_input_gate_out4, nullptr, &_input_gate_out5, 1.f, 0.f);
165 _input_gate_out4.allocator()->allocate();
166 _memory_group.manage(&_input_gate_out6);
167 _accum_input_gate1.configure(&_input_gate_out1, &_input_gate_out3, &_input_gate_out6, ConvertPolicy::SATURATE);
168 _input_gate_out3.allocator()->allocate();
169 _accum_input_gate2.configure(&_input_gate_out6, &_input_gate_out5, &_input_gate_out1, ConvertPolicy::SATURATE);
170 _input_gate_out5.allocator()->allocate();
171 _input_gate_out6.allocator()->allocate();
172 _activation_input_gate.configure(&_input_gate_out1, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
173 }
174
175 TensorShape cell_state1_shape = compute_transposed_shape(*recurrent_to_output_weights->info());
176 _cell_state_out1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
177 _cell_state_out2.allocator()->init(TensorInfo(cell_state1_shape, 1, input->info()->data_type()));
178 _cell_state_out3.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
179 _cell_state_out4.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
180 _cell_state_out5.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
181
182 // Configure block that calculates the cell state
183 // cell_state = Clip((RixelwiseMul(input_gate, Activation(input * input_to_cell_weights + output_state * recurrent_to_cell_weights + cell_bias)) + PixelwiseMul(forget_gate, cell_state)), cell_threshold)
184 _memory_group.manage(&_cell_state_out1);
185 _fully_connected_cell_state.configure(input, input_to_cell_weights, cell_bias, &_cell_state_out1, true, false);
186 _memory_group.manage(&_cell_state_out2);
187 _transpose_cell_state1.configure(recurrent_to_cell_weights, &_cell_state_out2);
188 _memory_group.manage(&_cell_state_out3);
189 _gemm_cell_state1.configure(output_state, &_cell_state_out2, nullptr, &_cell_state_out3, 1.f, 0.f);
190 _cell_state_out2.allocator()->allocate();
191 _memory_group.manage(&_cell_state_out4);
192 _accum_cell_state1.configure(&_cell_state_out1, &_cell_state_out3, &_cell_state_out4, ConvertPolicy::SATURATE);
193 _activation_cell_state.configure(&_cell_state_out4, nullptr, activation_info);
194 _memory_group.manage(&_cell_state_out5);
195 _pixelwise_mul_cell_state1.configure(&_cell_state_out4, &_input_gate_out1, &_cell_state_out5, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
196 _input_gate_out1.allocator()->allocate();
197 _cell_state_out4.allocator()->allocate();
198 _pixelwise_mul_cell_state2.configure(&_forget_gate_out1, cell_state, &_cell_state_out3, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
199 _forget_gate_out1.allocator()->allocate();
200 _accum_cell_state2.configure(&_cell_state_out5, &_cell_state_out3, &_cell_state_out1, ConvertPolicy::SATURATE);
201 _cell_state_out3.allocator()->allocate();
202 _cell_state_out5.allocator()->allocate();
203
204 // Perform clipping
205 if(cell_threshold != 0.f)
206 {
207 _perform_cell_clipping = true;
208 _cell_clip.configure(&_cell_state_out1, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -cell_threshold, cell_threshold));
209 }
210
211 TensorShape output1_shape = compute_transposed_shape(*recurrent_to_output_weights->info());
212 TensorShape output2_shape = compute_transposed_shape(*cell_bias->info());
213 TensorShape output3_shape{ 1, output_state->info()->dimension(1) };
214 _output1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
215 _output2.allocator()->init(TensorInfo(output1_shape, 1, input->info()->data_type()));
216 _output3.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
217 _output6.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
218
219 // Configure block that calculates the output
220 // output_gate = Activation(input * input_to_output_weights + output_state * recurrent_to_output_weights + cell_state * cell_to_output_weights + output_gate_bias)
221 _memory_group.manage(&_output1);
222 _fully_connected_output.configure(input, input_to_output_weights, output_gate_bias, &_output1, true, false);
223 _memory_group.manage(&_output2);
224 _transpose_output1.configure(recurrent_to_output_weights, &_output2);
225 _memory_group.manage(&_output3);
226 _gemm_output1.configure(output_state, &_output2, nullptr, &_output3, 1.f, 0.f);
227 _output2.allocator()->allocate();
228 _memory_group.manage(&_output6);
229 _accum_output1.configure(&_output1, &_output3, &_output6, ConvertPolicy::SATURATE);
230 _output3.allocator()->allocate();
231 CLTensor *output_gate_out = &_output6;
232 if(lstm_params.has_peephole_opt())
233 {
234 _output4.allocator()->init(TensorInfo(output2_shape, 1, input->info()->data_type()));
235 _output5.allocator()->init(TensorInfo(output3_shape, 1, input->info()->data_type()));
236
237 _memory_group.manage(&_output4);
238 _transpose_output2.configure(lstm_params.cell_to_output_weights(), &_output4);
239 _memory_group.manage(&_output5);
240 _gemm_output2.configure(&_cell_state_out1, &_output4, nullptr, &_output5, 1.f, 0.f);
241 _accum_output2.configure(&_output6, &_output5, &_output1, ConvertPolicy::SATURATE);
242 _output6.allocator()->allocate();
243 output_gate_out = &_output1;
244
245 // Allocate intermediate buffers
246 _output4.allocator()->allocate();
247 _output5.allocator()->allocate();
248 }
249 else
250 {
251 _output1.allocator()->allocate();
252 }
253 _activation_output.configure(output_gate_out, output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
254 output_gate_out->allocator()->allocate();
255
256 _cell_state_activation.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
257
258 // Configure block that calculates the output state
259 /** lstm_res = PixelwiseMul(output, Activation(cell_state))
260 *
261 * -- Clip(lstm_res * projection_weights + projection_bias, projection_threshold) , if there is a projection
262 * /
263 * output_state = --
264 * \
265 * -- lstm_res , otherwise
266 */
267 _memory_group.manage(&_cell_state_activation);
268 _activation_output_state.configure(&_cell_state_out1, &_cell_state_activation, activation_info);
269 _pixelwise_mul_output_state.configure(&_cell_state_activation, output, output_state, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
270 _cell_state_activation.allocator()->allocate();
271
272 if(lstm_params.has_projection())
273 {
274 _has_projection_weights = true;
275 _output_projection1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
276 _memory_group.manage(&_output_projection1);
277 _fully_connected_output_state.configure(output_state, lstm_params.projection_weights(), lstm_params.projection_bias(), &_output_projection1, true, false);
278 // Perform clipping
279 if(projection_threshold != 0.f)
280 {
281 _perform_projection_clipping = true;
282 _projection_clip.configure(&_output_projection1, output_state, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -projection_threshold, projection_threshold));
283 }
284
285 // Allocate intermediate buffer
286 _output_projection1.allocator()->allocate();
287 }
288
289 // Copy cell state and output
290 _copy_cell_state.configure(&_cell_state_out1, cell_state);
291 _cell_state_out1.allocator()->allocate();
292 _copy_output.configure(output_state, output);
293
294 // Vector for holding the tensors to store in scratch buffer
295 std::vector<ICLTensor *> scratch_inputs;
296 if(lstm_params.has_cifg_opt())
297 {
298 scratch_inputs.emplace_back(&_input_gate_out1);
299 }
300 scratch_inputs.emplace_back(&_cell_state_out1);
301 scratch_inputs.emplace_back(forget_gate_out);
302 scratch_inputs.emplace_back(output_gate_out);
303 _concat_scratch_buffer.configure(scratch_inputs, scratch_buffer);
304}
305
306Status CLLSTMLayer::validate(const ITensorInfo *input, const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights,
307 const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights,
308 const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias,
309 const ITensorInfo *output_state, const ITensorInfo *cell_state, const ITensorInfo *scratch_buffer, const ITensorInfo *output,
310 const LSTMParams<ITensorInfo> &lstm_params, const ActivationLayerInfo &activation_info, float cell_threshold, float projection_threshold)
311{
312 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights,
313 forget_gate_bias, cell_bias, output_gate_bias, output_state, cell_state);
314 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
315 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_forget_weights, recurrent_to_cell_weights,
316 recurrent_to_output_weights, forget_gate_bias, cell_bias, output_gate_bias, output_state, cell_state);
317 ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() != 2);
318 ARM_COMPUTE_RETURN_ERROR_ON(input_to_forget_weights->num_dimensions() != 2);
319 ARM_COMPUTE_RETURN_ERROR_ON(input_to_cell_weights->num_dimensions() != 2);
320 ARM_COMPUTE_RETURN_ERROR_ON(input_to_output_weights->num_dimensions() != 2);
321 ARM_COMPUTE_RETURN_ERROR_ON(recurrent_to_forget_weights->num_dimensions() != 2);
322 ARM_COMPUTE_RETURN_ERROR_ON(recurrent_to_cell_weights->num_dimensions() != 2);
323 ARM_COMPUTE_RETURN_ERROR_ON(recurrent_to_output_weights->num_dimensions() != 2);
324 ARM_COMPUTE_RETURN_ERROR_ON(forget_gate_bias->num_dimensions() != 1);
325 ARM_COMPUTE_RETURN_ERROR_ON(cell_bias->num_dimensions() != 1);
326 ARM_COMPUTE_RETURN_ERROR_ON(output_gate_bias->num_dimensions() != 1);
327 ARM_COMPUTE_RETURN_ERROR_ON(output_state->num_dimensions() != 2);
328 ARM_COMPUTE_RETURN_ERROR_ON(cell_state->num_dimensions() != 2);
329 ARM_COMPUTE_RETURN_ERROR_ON(scratch_buffer->num_dimensions() != 2);
330 ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() != 2);
331 ARM_COMPUTE_RETURN_ERROR_ON(cell_bias->dimension(0) * 4 != scratch_buffer->dimension(0) && cell_bias->dimension(0) * 3 != scratch_buffer->dimension(0));
332
333 if(lstm_params.has_peephole_opt())
334 {
335 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lstm_params.cell_to_input_weights(), lstm_params.cell_to_output_weights(), lstm_params.cell_to_forget_weights());
336 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.cell_to_input_weights()->num_dimensions() != 1);
337 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.cell_to_forget_weights()->num_dimensions() != 1);
338 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.cell_to_output_weights()->num_dimensions() != 1);
339 }
340
341 TensorShape units_out_transposed_shape = compute_transposed_shape(*recurrent_to_output_weights);
342 TensorShape gemmv_shape{ 1, output_state->dimension(1) };
343 TensorShape num_units_transposed_shape = compute_transposed_shape(*forget_gate_bias);
344 const TensorInfo units_out_transposed_info = TensorInfo(units_out_transposed_shape, 1, input->data_type());
345 const TensorInfo gemmv_shape_info = TensorInfo(gemmv_shape, 1, input->data_type());
346 const TensorInfo num_units_transposed_info = TensorInfo(num_units_transposed_shape, 1, input->data_type());
347
348 // Validate forget gate
349 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, input_to_forget_weights, forget_gate_bias, cell_state, true, false));
350 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(output_state, &units_out_transposed_info, nullptr, cell_state, 1.f, 0.f, GEMMInfo()));
351 ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAdditionKernel::validate(cell_state, cell_state, cell_state, ConvertPolicy::SATURATE));
352 if(lstm_params.has_peephole_opt())
353 {
354 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(cell_state, &num_units_transposed_info, nullptr, &gemmv_shape_info, 1.f, 0.f, GEMMInfo()));
355 ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(cell_state, &gemmv_shape_info, cell_state, ConvertPolicy::SATURATE));
356 }
357 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, cell_state, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)));
358
359 // Validate input gate
360 if(!lstm_params.has_cifg_opt())
361 {
362 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lstm_params.input_to_input_weights(), lstm_params.recurrent_to_input_weights(), lstm_params.cell_to_input_weights(), lstm_params.input_gate_bias());
363 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.input_to_input_weights()->num_dimensions() != 2);
364 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.recurrent_to_input_weights()->num_dimensions() != 2);
365 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.cell_to_input_weights()->num_dimensions() != 1);
366 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.input_gate_bias()->num_dimensions() != 1);
367 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, lstm_params.input_to_input_weights(), lstm_params.input_gate_bias(), cell_state, true, false));
368 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(cell_state, &num_units_transposed_info, nullptr, &gemmv_shape_info, 1.f, 0.f, GEMMInfo()));
369 ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(cell_state, &gemmv_shape_info, cell_state, ConvertPolicy::SATURATE));
370 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)));
371 }
372 else
373 {
374 ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticSubtractionKernel::validate(cell_state, cell_state, cell_state, ConvertPolicy::SATURATE));
375 }
376
377 // Validate cell state
378 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, input_to_cell_weights, cell_bias, cell_state, true, false));
379 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, nullptr, activation_info));
380 ARM_COMPUTE_RETURN_ON_ERROR(CLPixelWiseMultiplicationKernel::validate(cell_state, cell_state, cell_state, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN));
381
382 if(cell_threshold != 0.f)
383 {
384 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -cell_threshold, cell_threshold)));
385 }
386
387 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, input_to_output_weights, output_gate_bias, cell_state, true, false));
388 if(lstm_params.has_peephole_opt())
389 {
390 ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(cell_state, cell_state, cell_state, ConvertPolicy::SATURATE));
391 }
392 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)));
393
394 // Validate output state
395 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, cell_state, activation_info));
396 ARM_COMPUTE_RETURN_ON_ERROR(CLPixelWiseMultiplicationKernel::validate(cell_state, output, output_state, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN));
397 if(lstm_params.has_projection())
398 {
399 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(output_state, lstm_params.projection_weights(), lstm_params.projection_bias(), cell_state, true, false));
400 if(projection_threshold != 0.f)
401 {
402 ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayerKernel::validate(cell_state, output_state, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -projection_threshold,
403 projection_threshold)));
404 }
405 }
406
407 std::vector<TensorInfo> inputs_vector_info;
408 if(lstm_params.has_cifg_opt())
409 {
410 inputs_vector_info.emplace_back(*cell_state);
411 }
412 inputs_vector_info.emplace_back(*cell_state);
413 inputs_vector_info.emplace_back(*cell_state);
414 inputs_vector_info.emplace_back(*cell_state);
415
416 std::vector<ITensorInfo *> inputs_vector_info_raw;
417 for(auto &input : inputs_vector_info)
418 {
419 inputs_vector_info_raw.emplace_back(&input);
420 }
421
422 ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenateLayer::validate(inputs_vector_info_raw, scratch_buffer));
423 return Status{};
424}
425
426void CLLSTMLayer::run()
427{
428 _memory_group.acquire();
429
430 _fully_connected_forget_gate.run();
431 CLScheduler::get().enqueue(_transpose_forget_gate1);
432 _gemm_forget_gate1.run();
433 CLScheduler::get().enqueue(_accum_forget_gate1);
434
435 if(_run_peephole_opt)
436 {
437 CLScheduler::get().enqueue(_transpose_forget_gate2);
438 _gemm_forget_gate2.run();
439 _accum_forget_gate2.run();
440 }
441 CLScheduler::get().enqueue(_activation_forget_gate);
442
443 if(_run_cifg_opt)
444 {
445 _ones.map(true);
446 std::fill_n(_ones.buffer(), _ones.info()->total_size(), 1);
447 _ones.unmap();
448 CLScheduler::get().enqueue(_subtract_input_gate);
449 }
450 else
451 {
452 _fully_connected_input_gate.run();
453 CLScheduler::get().enqueue(_transpose_input_gate1);
454 _gemm_input_gate1.run();
455 CLScheduler::get().enqueue(_transpose_input_gate2);
456 _gemm_input_gate2.run();
457 CLScheduler::get().enqueue(_accum_input_gate1);
458 _accum_input_gate2.run();
459 CLScheduler::get().enqueue(_activation_input_gate);
460 }
461
462 _fully_connected_cell_state.run();
463 CLScheduler::get().enqueue(_transpose_cell_state1);
464 _gemm_cell_state1.run();
465 CLScheduler::get().enqueue(_accum_cell_state1);
466 CLScheduler::get().enqueue(_activation_cell_state);
467 CLScheduler::get().enqueue(_pixelwise_mul_cell_state1);
468 CLScheduler::get().enqueue(_pixelwise_mul_cell_state2);
469 CLScheduler::get().enqueue(_accum_cell_state2);
470
471 if(_perform_cell_clipping)
472 {
473 CLScheduler::get().enqueue(_cell_clip);
474 }
475
476 _fully_connected_output.run();
477 CLScheduler::get().enqueue(_transpose_output1);
478 _gemm_output1.run();
479 CLScheduler::get().enqueue(_accum_output1);
480 CLScheduler::get().enqueue(_pixelwise_mul_output_state);
481
482 if(_run_peephole_opt)
483 {
484 CLScheduler::get().enqueue(_transpose_output2);
485 _gemm_output2.run();
486 _accum_output2.run();
487 }
488 CLScheduler::get().enqueue(_activation_output);
489
490 CLScheduler::get().enqueue(_activation_output_state);
491 CLScheduler::get().enqueue(_pixelwise_mul_output_state);
492
493 if(_has_projection_weights)
494 {
495 _fully_connected_output_state.run();
496 if(_perform_projection_clipping)
497 {
498 CLScheduler::get().enqueue(_projection_clip);
499 }
500 }
501
502 CLScheduler::get().enqueue(_copy_cell_state);
503 CLScheduler::get().enqueue(_copy_output);
504
505 _concat_scratch_buffer.run();
506
507 _memory_group.release();
508}