blob: 2a2980bebe83dc4e587b474eb8440dc5a8501e45 [file] [log] [blame]
Michalis Spyrou25f45a42018-08-08 12:53:05 +01001/*
Georgios Pinitas13a20802019-01-16 18:21:08 +00002 * Copyright (c) 2018-2019 ARM Limited.
Michalis Spyrou25f45a42018-08-08 12:53:05 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NELSTMLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
29#include "arm_compute/core/utils/misc/ShapeCalculator.h"
30#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31#include "arm_compute/runtime/common/LSTMParams.h"
32
33#include <cmath>
34#include <memory>
35#include <tuple>
36
37using namespace arm_compute;
38using namespace arm_compute::misc::shape_calculator;
39
40NELSTMLayer::NELSTMLayer(std::shared_ptr<IMemoryManager> memory_manager)
41 : _memory_group(std::move(memory_manager)), _fully_connected_input_gate(), _gemm_input_gate(), _transpose_input_gate(), _accum_input_gate1(), _accum_input_gate2(), _subtract_input_gate(),
42 _pixelwise_mul_input_gate(), _activation_input_gate(), _fully_connected_forget_gate(), _gemm_forget_gate(), _transpose_forget_gate(), _accum_forget_gate1(), _accum_forget_gate2(),
43 _pixelwise_mul_forget_gate(), _activation_forget_gate(), _fully_connected_cell_state(), _gemm_cell_state1(), _gemm_cell_state2(), _transpose_cell_state(), _accum_cell_state1(), _accum_cell_state2(),
44 _pixelwise_mul_cell_state1(), _activation_cell_state(), _cell_clip(), _pixelwise_mul_cell_state2(), _fully_connected_output(), _gemm_output(), _pixelwise_mul_output_state1(), _transpose_output(),
45 _accum_output1(), _accum_output2(), _activation_output(), _activation_output_state(), _pixelwise_mul_output_state2(), _fully_connected_output_state(), _gemm_output_state(), _accum_output_state(),
46 _projection_clip(), _copy_cell_state(), _copy_output(), _concat_scratch_buffer(), _input_gate_out1(), _input_gate_out2(), _input_gate_out3(), _input_gate_out4(), _input_gate_out5(),
47 _forget_gate_out1(), _forget_gate_out2(), _forget_gate_out3(), _forget_gate_out4(), _forget_gate_out5(), _cell_state_out1(), _cell_state_out2(), _cell_state_out3(), _cell_state_out4(),
48 _cell_state_out5(), _output1(), _output2(), _output3(), _output4(), _output5(), _cell_state_activation(), _output_state1(), _ones(), _run_peephole_opt(false), _run_cifg_opt(false),
49 _perform_cell_clipping(false), _has_projection_weights(false), _perform_projection_clipping(false)
50{
51}
52
53void NELSTMLayer::configure(const ITensor *input,
54 const ITensor *input_to_forget_weights, const ITensor *input_to_cell_weights, const ITensor *input_to_output_weights,
55 const ITensor *recurrent_to_forget_weights, const ITensor *recurrent_to_cell_weights, const ITensor *recurrent_to_output_weights,
56 const ITensor *forget_gate_bias, const ITensor *cell_bias, const ITensor *output_gate_bias,
57 const ITensor *output_state_in, const ITensor *cell_state_in,
58 ITensor *scratch_buffer, ITensor *output_state_out, ITensor *cell_state_out, ITensor *output,
59 const LSTMParams<ITensor> &lstm_params, const ActivationLayerInfo &activation_info, float cell_threshold, float projection_threshold)
60{
61 ARM_COMPUTE_ERROR_ON_NULLPTR(input,
62 input_to_forget_weights, input_to_cell_weights, input_to_output_weights,
63 recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights,
64 forget_gate_bias, cell_bias, output_gate_bias,
65 output_state_in, cell_state_in,
66 scratch_buffer, output_state_out, cell_state_out, output);
67
68 // Set lstm parameters
69 LSTMParams<ITensorInfo> lstm_params_info;
70 if(lstm_params.has_peephole_opt())
71 {
72 lstm_params_info.set_peephole_params(lstm_params.cell_to_forget_weights()->info(), lstm_params.cell_to_output_weights()->info());
73 }
74 if(lstm_params.has_projection())
75 {
76 lstm_params_info.set_projection_params(lstm_params.projection_weights()->info(),
77 lstm_params.projection_bias() != nullptr ? lstm_params.projection_bias()->info() : nullptr);
78 }
79 if(!lstm_params.has_cifg_opt())
80 {
81 const ITensorInfo *cell_to_input_weights_info = (lstm_params.has_peephole_opt()) ? lstm_params.cell_to_input_weights()->info() : nullptr;
82 lstm_params_info.set_cifg_params(lstm_params.input_to_input_weights()->info(), lstm_params.recurrent_to_input_weights()->info(),
83 cell_to_input_weights_info, lstm_params.input_gate_bias()->info());
84 }
85
86 // Validate
87 ARM_COMPUTE_ERROR_THROW_ON(NELSTMLayer::validate(input->info(), input_to_forget_weights->info(),
88 input_to_cell_weights->info(), input_to_output_weights->info(),
89 recurrent_to_forget_weights->info(), recurrent_to_cell_weights->info(), recurrent_to_output_weights->info(),
90 forget_gate_bias->info(), cell_bias->info(), output_gate_bias->info(),
91 output_state_in->info(), cell_state_in->info(),
92 scratch_buffer->info(), output_state_out->info(), cell_state_out->info(), output->info(),
93 lstm_params_info, activation_info, cell_threshold, projection_threshold));
94
95 const TensorShape cell_state_shape = cell_state_in->info()->tensor_shape();
96
97 // Configure block that calculates the forget gate
98 // forget_gate = Activation(input * input_to_forget_weights + output_state_in * recurrent_to_forget_weights + PixelWiseMul(cell_state, cell_to_forget_weights) + forget_gate_bias)
99 TensorShape forget_gate1_shape = compute_transposed_shape(*recurrent_to_output_weights->info());
100 _forget_gate_out1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
101 _forget_gate_out2.allocator()->init(TensorInfo(forget_gate1_shape, 1, input->info()->data_type()));
102 _forget_gate_out3.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
103 _forget_gate_out5.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
104
105 _memory_group.manage(&_forget_gate_out1);
106 _fully_connected_forget_gate.configure(input, input_to_forget_weights, forget_gate_bias, &_forget_gate_out1);
107 _memory_group.manage(&_forget_gate_out2);
108 _transpose_forget_gate.configure(recurrent_to_forget_weights, &_forget_gate_out2);
109 _memory_group.manage(&_forget_gate_out3);
110 _gemm_forget_gate.configure(output_state_in, &_forget_gate_out2, nullptr, &_forget_gate_out3, 1.f, 0.f);
111 _forget_gate_out2.allocator()->allocate();
112 _memory_group.manage(&_forget_gate_out5);
113 _accum_forget_gate1.configure(&_forget_gate_out1, &_forget_gate_out3, &_forget_gate_out5, ConvertPolicy::SATURATE);
114 Tensor *forget_gate_out = &_forget_gate_out5;
115
116 if(lstm_params.has_peephole_opt())
117 {
118 _forget_gate_out4.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
119
120 _run_peephole_opt = true;
121 _memory_group.manage(&_forget_gate_out4);
122 _pixelwise_mul_forget_gate.configure(cell_state_in, lstm_params.cell_to_forget_weights(), &_forget_gate_out4, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO);
123 _accum_forget_gate2.configure(&_forget_gate_out5, &_forget_gate_out4, &_forget_gate_out3, ConvertPolicy::SATURATE);
124 _forget_gate_out4.allocator()->allocate();
125 _forget_gate_out5.allocator()->allocate();
126 forget_gate_out = &_forget_gate_out3;
127 }
128 else
129 {
130 _forget_gate_out3.allocator()->allocate();
131 }
132 _activation_forget_gate.configure(forget_gate_out, &_forget_gate_out1, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
133 forget_gate_out->allocator()->allocate();
134
135 // Configure block that calculates the input gate
136 // input_gate = Activation(input * input_to_input_weights + output_state * recurrent_to_input_weights + PixelWiseMul(cell_state, cell_to_input_weights) + input_gate_bias), without CIFG
137 // input_gate = 1 - forget_gate, with CIFG
138 _input_gate_out1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
139 if(lstm_params.has_cifg_opt())
140 {
141 _memory_group.manage(&_input_gate_out1);
142 _ones.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
143 _subtract_input_gate.configure(&_ones, &_forget_gate_out1, &_input_gate_out1, ConvertPolicy::SATURATE);
144 _ones.allocator()->allocate();
145 _run_cifg_opt = true;
146 }
147 else
148 {
149 TensorShape input_gate_shape = compute_transposed_shape(*recurrent_to_output_weights->info());
150
151 _input_gate_out2.allocator()->init(TensorInfo(input_gate_shape, 1, input->info()->data_type()));
152 _input_gate_out3.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
153 _input_gate_out4.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
154 _input_gate_out5.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
155
156 _memory_group.manage(&_input_gate_out1);
157 _fully_connected_input_gate.configure(input, lstm_params.input_to_input_weights(), lstm_params.input_gate_bias(), &_input_gate_out1);
158 _memory_group.manage(&_input_gate_out2);
159 _transpose_input_gate.configure(lstm_params.recurrent_to_input_weights(), &_input_gate_out2);
160 _memory_group.manage(&_input_gate_out3);
161 _gemm_input_gate.configure(output_state_in, &_input_gate_out2, nullptr, &_input_gate_out3, 1.f, 0.f);
162 _input_gate_out2.allocator()->allocate();
163 _memory_group.manage(&_input_gate_out4);
164 _accum_input_gate1.configure(&_input_gate_out1, &_input_gate_out3, &_input_gate_out4, ConvertPolicy::SATURATE);
165 if(_run_peephole_opt)
166 {
167 _memory_group.manage(&_input_gate_out5);
168 _pixelwise_mul_input_gate.configure(cell_state_in, lstm_params.cell_to_input_weights(), &_input_gate_out5, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO);
169 _accum_input_gate2.configure(&_input_gate_out4, &_input_gate_out5, &_input_gate_out1, ConvertPolicy::SATURATE);
170 _input_gate_out5.allocator()->allocate();
171 }
172 _input_gate_out3.allocator()->allocate();
173 _input_gate_out4.allocator()->allocate();
174 _activation_input_gate.configure(&_input_gate_out1, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
175 }
176
177 // Configure block that calculates the cell state
178 // cell_state = Clip((PixelwiseMul(input_gate, Activation(input * input_to_cell_weights + output_state_in * recurrent_to_cell_weights + cell_bias)) + PixelwiseMul(forget_gate, cell_state)), cell_threshold)
179 TensorShape cell_state1_shape = compute_transposed_shape(*recurrent_to_output_weights->info());
180 _cell_state_out1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
181 _cell_state_out2.allocator()->init(TensorInfo(cell_state1_shape, 1, input->info()->data_type()));
182 _cell_state_out3.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
183 _cell_state_out4.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
184 _cell_state_out5.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
185
186 _memory_group.manage(&_cell_state_out1);
187 _fully_connected_cell_state.configure(input, input_to_cell_weights, cell_bias, &_cell_state_out1);
188 _memory_group.manage(&_cell_state_out2);
189 _transpose_cell_state.configure(recurrent_to_cell_weights, &_cell_state_out2);
190 _memory_group.manage(&_cell_state_out3);
191 _gemm_cell_state1.configure(output_state_in, &_cell_state_out2, nullptr, &_cell_state_out3, 1.f, 0.f);
192 _cell_state_out2.allocator()->allocate();
193 _memory_group.manage(&_cell_state_out4);
194 _accum_cell_state1.configure(&_cell_state_out1, &_cell_state_out3, &_cell_state_out4, ConvertPolicy::SATURATE);
195 _activation_cell_state.configure(&_cell_state_out4, nullptr, activation_info);
196 _memory_group.manage(&_cell_state_out5);
197 _pixelwise_mul_cell_state1.configure(&_cell_state_out4, &_input_gate_out1, &_cell_state_out5, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO);
198 _input_gate_out1.allocator()->allocate();
199 _cell_state_out4.allocator()->allocate();
200 _pixelwise_mul_cell_state2.configure(&_forget_gate_out1, cell_state_in, &_cell_state_out3, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO);
201 _forget_gate_out1.allocator()->allocate();
202 _accum_cell_state2.configure(&_cell_state_out5, &_cell_state_out3, &_cell_state_out1, ConvertPolicy::SATURATE);
203 _cell_state_out3.allocator()->allocate();
204 _cell_state_out5.allocator()->allocate();
205 // Perform clipping
206 if(cell_threshold != 0.f)
207 {
208 _perform_cell_clipping = true;
209 _cell_clip.configure(&_cell_state_out1, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -cell_threshold, cell_threshold));
210 }
211
212 // Configure block that calculates the output
213 // output_state_out = Activation(input * input_to_output_weights + output_state_in * recurrent_to_output_weights + PixelWiseMul(cell_state, cell_to_output_weights) + output_gate_bias)
214 TensorShape output1_shape = compute_transposed_shape(*recurrent_to_output_weights->info());
215 _output1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
216 _output2.allocator()->init(TensorInfo(output1_shape, 1, input->info()->data_type()));
217 _output3.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
218 _output5.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
219
220 _memory_group.manage(&_output1);
221 _fully_connected_output.configure(input, input_to_output_weights, output_gate_bias, &_output1);
222 _memory_group.manage(&_output2);
223 _transpose_output.configure(recurrent_to_output_weights, &_output2);
224 _memory_group.manage(&_output3);
225 _gemm_output.configure(output_state_in, &_output2, nullptr, &_output3, 1.f, 0.f);
226 _output2.allocator()->allocate();
227 _memory_group.manage(&_output5);
228 _accum_output1.configure(&_output1, &_output3, &_output5, ConvertPolicy::SATURATE);
229 _output3.allocator()->allocate();
230 Tensor *output_gate_out = &_output5;
231 if(lstm_params.has_peephole_opt())
232 {
233 _output4.allocator()->init(TensorInfo(_cell_state_out1.info()->tensor_shape(), 1, input->info()->data_type()));
234
235 _memory_group.manage(&_output4);
236 _pixelwise_mul_output_state1.configure(&_cell_state_out1, lstm_params.cell_to_output_weights(), &_output4, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO);
237 _accum_output2.configure(&_output5, &_output4, &_output1, ConvertPolicy::SATURATE);
238 _output5.allocator()->allocate();
239 output_gate_out = &_output1;
240
241 // Allocate intermediate buffers
242 _output4.allocator()->allocate();
243 }
244 else
245 {
246 _output1.allocator()->allocate();
247 }
248 _activation_output.configure(output_gate_out, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
Michalis Spyrou25f45a42018-08-08 12:53:05 +0100249
250 // Configure block that calculates the output state
251 /** lstm_res = PixelwiseMul(output, Activation(cell_state))
252 *
253 * -- Clip(lstm_res * projection_weights + projection_bias, projection_threshold) , if there is a projection
254 * /
255 * output_state = --
256 * \
257 * -- lstm_res , otherwise
258 */
259 ITensor *output_state_out_tmp = lstm_params.has_projection() ? &_output_state1 : output_state_out;
260 _cell_state_activation.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
261 _output_state1.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type()));
262
263 _memory_group.manage(&_cell_state_activation);
264 _activation_output_state.configure(&_cell_state_out1, &_cell_state_activation, activation_info);
265 _pixelwise_mul_output_state2.configure(&_cell_state_activation, output_gate_out, output_state_out_tmp, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO);
266 _cell_state_activation.allocator()->allocate();
Georgios Pinitas13a20802019-01-16 18:21:08 +0000267 output_gate_out->allocator()->allocate();
Michalis Spyrou25f45a42018-08-08 12:53:05 +0100268
269 if(lstm_params.has_projection())
270 {
271 _has_projection_weights = true;
272 _fully_connected_output_state.configure(output_state_out_tmp, lstm_params.projection_weights(), lstm_params.projection_bias(), output_state_out);
273 _output_state1.allocator()->allocate();
274 // Perform clipping
275 if(projection_threshold != 0.f)
276 {
277 _perform_projection_clipping = true;
278 _projection_clip.configure(output_state_out, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -projection_threshold, projection_threshold));
279 }
280 }
281
282 // Copy cell state and output
283 _copy_cell_state.configure(&_cell_state_out1, cell_state_out);
284 _cell_state_out1.allocator()->allocate();
285 _copy_output.configure(output_state_out, output);
286
287 // Vector for holding the tensors to store in scratch buffer
288 std::vector<ITensor *> scratch_inputs;
Georgios Pinitas0cc37c32018-11-14 15:54:26 +0000289 if(!lstm_params.has_cifg_opt())
Michalis Spyrou25f45a42018-08-08 12:53:05 +0100290 {
291 scratch_inputs.emplace_back(&_input_gate_out1);
292 }
293 scratch_inputs.emplace_back(&_cell_state_out1);
294 scratch_inputs.emplace_back(forget_gate_out);
295 scratch_inputs.emplace_back(output_gate_out);
296 _concat_scratch_buffer.configure(scratch_inputs, scratch_buffer);
297}
298
299Status NELSTMLayer::validate(const ITensorInfo *input,
300 const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights,
301 const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights,
302 const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias,
303 const ITensorInfo *output_state_in, const ITensorInfo *cell_state_in,
304 const ITensorInfo *scratch_buffer, const ITensorInfo *output_state_out, const ITensorInfo *cell_state_out, const ITensorInfo *output,
305 const LSTMParams<ITensorInfo> &lstm_params, const ActivationLayerInfo &activation_info, float cell_threshold, float projection_threshold)
306{
307 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input,
308 input_to_forget_weights, input_to_cell_weights, input_to_output_weights,
309 recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights,
310 forget_gate_bias, cell_bias, output_gate_bias,
311 output_state_in, cell_state_in,
312 scratch_buffer, output_state_out, cell_state_out, output);
313
314 // Check data types
315 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
316 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input,
317 input_to_forget_weights, input_to_cell_weights, input_to_output_weights,
318 recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights,
319 forget_gate_bias, cell_bias, output_gate_bias,
320 output_state_in, cell_state_in,
321 scratch_buffer, output_state_out, cell_state_out, output);
322
323 // Check dimensions
324 ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 2);
325 ARM_COMPUTE_RETURN_ERROR_ON(input_to_forget_weights->num_dimensions() > 2);
326 ARM_COMPUTE_RETURN_ERROR_ON(input_to_cell_weights->num_dimensions() > 2);
327 ARM_COMPUTE_RETURN_ERROR_ON(input_to_output_weights->num_dimensions() > 2);
328 ARM_COMPUTE_RETURN_ERROR_ON(recurrent_to_forget_weights->num_dimensions() > 2);
329 ARM_COMPUTE_RETURN_ERROR_ON(recurrent_to_cell_weights->num_dimensions() > 2);
330 ARM_COMPUTE_RETURN_ERROR_ON(recurrent_to_output_weights->num_dimensions() > 2);
331 ARM_COMPUTE_RETURN_ERROR_ON(forget_gate_bias->num_dimensions() > 1);
332 ARM_COMPUTE_RETURN_ERROR_ON(cell_bias->num_dimensions() > 1);
333 ARM_COMPUTE_RETURN_ERROR_ON(output_gate_bias->num_dimensions() > 1);
334 ARM_COMPUTE_RETURN_ERROR_ON(output_state_in->num_dimensions() > 2);
335 ARM_COMPUTE_RETURN_ERROR_ON(cell_state_in->num_dimensions() > 2);
336 ARM_COMPUTE_RETURN_ERROR_ON(scratch_buffer->num_dimensions() > 2);
337 ARM_COMPUTE_RETURN_ERROR_ON(output_state_out->num_dimensions() > 2);
338 ARM_COMPUTE_RETURN_ERROR_ON(cell_state_out->num_dimensions() > 2);
339 ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 2);
340 ARM_COMPUTE_RETURN_ERROR_ON(cell_bias->dimension(0) * 4 != scratch_buffer->dimension(0)
341 && cell_bias->dimension(0) * 3 != scratch_buffer->dimension(0));
342
343 const unsigned int num_batches = input->dimension(1);
344 const unsigned int num_cells = input_to_output_weights->dimension(1);
345
346 // Check peephole optimization
347 if(lstm_params.has_peephole_opt())
348 {
349 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lstm_params.cell_to_output_weights(), lstm_params.cell_to_forget_weights());
350 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.cell_to_forget_weights()->num_dimensions() > 1);
351 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.cell_to_output_weights()->num_dimensions() > 1);
352 }
353
354 TensorShape units_out_transposed_shape = compute_transposed_shape(*recurrent_to_output_weights);
355 TensorShape num_units_transposed_shape = compute_transposed_shape(*forget_gate_bias);
356 const TensorInfo units_out_transposed_info = TensorInfo(units_out_transposed_shape, 1, input->data_type());
357 const TensorInfo num_units_transposed_info = TensorInfo(num_units_transposed_shape, 1, input->data_type());
358
359 TensorInfo input_gate = TensorInfo(TensorShape(num_cells, num_batches), 1, input->data_type());
360 TensorInfo forget_gate = TensorInfo(TensorShape(num_cells, num_batches), 1, input->data_type());
361 TensorInfo output_gate_tmp = TensorInfo(TensorShape(num_cells, num_batches), 1, input->data_type());
362 TensorInfo cell_state_tmp = TensorInfo(TensorShape(num_cells, num_batches), 1, input->data_type());
363
364 // Validate forget gate
365 ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayer::validate(input, input_to_forget_weights, forget_gate_bias, &forget_gate));
366 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMM::validate(output_state_in, &units_out_transposed_info, nullptr, &forget_gate, 1.f, 0.f, GEMMInfo()));
367 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAdditionKernel::validate(&forget_gate, &forget_gate, &forget_gate, ConvertPolicy::SATURATE));
368 if(lstm_params.has_peephole_opt())
369 {
370 ARM_COMPUTE_RETURN_ON_ERROR(NEPixelWiseMultiplicationKernel::validate(cell_state_in, lstm_params.cell_to_forget_weights(), &forget_gate, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO));
371 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&forget_gate, &forget_gate, &forget_gate, ConvertPolicy::SATURATE));
372 }
373 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayerKernel::validate(&forget_gate, &forget_gate, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)));
374
375 // Validate input gate
376 if(!lstm_params.has_cifg_opt())
377 {
378 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lstm_params.input_to_input_weights(),
379 lstm_params.recurrent_to_input_weights(),
380 lstm_params.input_gate_bias());
381 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.input_to_input_weights()->num_dimensions() > 2);
382 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.recurrent_to_input_weights()->num_dimensions() > 2);
383 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.input_gate_bias()->num_dimensions() > 1);
384
385 ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayer::validate(input, lstm_params.input_to_input_weights(), lstm_params.input_gate_bias(), &input_gate));
386 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMM::validate(output_state_in, &units_out_transposed_info, nullptr, &input_gate, 1.f, 0.f, GEMMInfo()));
387 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&input_gate, &input_gate, &input_gate, ConvertPolicy::SATURATE));
388 if(lstm_params.has_peephole_opt())
389 {
390 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lstm_params.cell_to_input_weights());
391 ARM_COMPUTE_RETURN_ERROR_ON(lstm_params.cell_to_input_weights()->num_dimensions() > 1);
392 ARM_COMPUTE_RETURN_ON_ERROR(NEPixelWiseMultiplicationKernel::validate(cell_state_in, lstm_params.cell_to_input_weights(), &input_gate, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO));
393 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&input_gate, &input_gate, &input_gate, ConvertPolicy::SATURATE));
394 }
395 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayerKernel::validate(&input_gate, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)));
396 }
397 else
398 {
399 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticSubtractionKernel::validate(&forget_gate, &forget_gate, &forget_gate, ConvertPolicy::SATURATE));
400 }
401
402 // Validate cell state
403 ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayer::validate(input, input_to_cell_weights, cell_bias, &cell_state_tmp));
404 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMM::validate(output_state_in, &units_out_transposed_info, nullptr, &cell_state_tmp, 1.f, 0.f, GEMMInfo()));
405 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&cell_state_tmp, &cell_state_tmp, &cell_state_tmp, ConvertPolicy::SATURATE));
406 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayerKernel::validate(&cell_state_tmp, nullptr, activation_info));
407 ARM_COMPUTE_RETURN_ON_ERROR(NEPixelWiseMultiplicationKernel::validate(&cell_state_tmp, &input_gate, &cell_state_tmp, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO));
408 ARM_COMPUTE_RETURN_ON_ERROR(NEPixelWiseMultiplicationKernel::validate(&cell_state_tmp, &forget_gate, &cell_state_tmp, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO));
409 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&cell_state_tmp, &cell_state_tmp, &cell_state_tmp, ConvertPolicy::SATURATE));
410 if(cell_threshold != 0.f)
411 {
412 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayerKernel::validate(&cell_state_tmp, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -cell_threshold,
413 cell_threshold)));
414 }
415
416 // Validate output gate tmp
417 ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayer::validate(input, input_to_output_weights, output_gate_bias, &output_gate_tmp));
418 ARM_COMPUTE_RETURN_ON_ERROR(NEGEMM::validate(output_state_in, &units_out_transposed_info, nullptr, &output_gate_tmp, 1.f, 0.f, GEMMInfo()));
419 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&output_gate_tmp, &output_gate_tmp, &output_gate_tmp, ConvertPolicy::SATURATE));
420 if(lstm_params.has_peephole_opt())
421 {
422 ARM_COMPUTE_RETURN_ON_ERROR(NEPixelWiseMultiplicationKernel::validate(&cell_state_tmp, lstm_params.cell_to_output_weights(), &output_gate_tmp, 1, ConvertPolicy::SATURATE,
423 RoundingPolicy::TO_ZERO));
424 ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&output_gate_tmp, &output_gate_tmp, &output_gate_tmp, ConvertPolicy::SATURATE));
425 }
426 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayerKernel::validate(&output_gate_tmp, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)));
427
428 // Validate output state
429 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayerKernel::validate(&cell_state_tmp, &cell_state_tmp, activation_info));
430 ARM_COMPUTE_RETURN_ON_ERROR(NEPixelWiseMultiplicationKernel::validate(&cell_state_tmp, &output_gate_tmp, &output_gate_tmp, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO));
431 if(lstm_params.has_projection())
432 {
433 ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayer::validate(&output_gate_tmp, lstm_params.projection_weights(), lstm_params.projection_bias(), output_state_out));
434 if(projection_threshold != 0.f)
435 {
436 ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayerKernel::validate(output_state_out, output_state_out,
437 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -projection_threshold, projection_threshold)));
438 }
439 }
440
441 // Validate copy kernel
442 ARM_COMPUTE_RETURN_ON_ERROR(NECopyKernel::validate(&cell_state_tmp, cell_state_out));
443 ARM_COMPUTE_RETURN_ON_ERROR(NECopyKernel::validate(output_state_out, output));
444
445 // Validate scratch concatenation
446 std::vector<ITensorInfo *> inputs_vector_info_raw;
Georgios Pinitas0cc37c32018-11-14 15:54:26 +0000447 if(!lstm_params.has_cifg_opt())
Michalis Spyrou25f45a42018-08-08 12:53:05 +0100448 {
449 inputs_vector_info_raw.push_back(&input_gate);
450 }
451 inputs_vector_info_raw.push_back(&cell_state_tmp);
452 inputs_vector_info_raw.push_back(&forget_gate);
453 inputs_vector_info_raw.push_back(&output_gate_tmp);
454
455 ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayer::validate(inputs_vector_info_raw, scratch_buffer));
456 return Status{};
457}
458
459void NELSTMLayer::run()
460{
461 _memory_group.acquire();
462
463 _fully_connected_forget_gate.run();
464 NEScheduler::get().schedule(&_transpose_forget_gate, Window::DimY);
465 _gemm_forget_gate.run();
466 NEScheduler::get().schedule(&_accum_forget_gate1, Window::DimY);
467
468 if(_run_peephole_opt)
469 {
470 NEScheduler::get().schedule(&_pixelwise_mul_forget_gate, Window::DimY);
471 _accum_forget_gate2.run();
472 }
473 NEScheduler::get().schedule(&_activation_forget_gate, Window::DimY);
474
475 if(_run_cifg_opt)
476 {
477 if(_ones.info()->data_type() == DataType::F16)
478 {
479 std::fill_n(reinterpret_cast<half *>(_ones.buffer()), _ones.info()->total_size() / _ones.info()->element_size(), 1);
480 }
481 else
482 {
483 std::fill_n(reinterpret_cast<float *>(_ones.buffer()), _ones.info()->total_size() / _ones.info()->element_size(), 1);
484 }
485 NEScheduler::get().schedule(&_subtract_input_gate, Window::DimY);
486 }
487 else
488 {
489 _fully_connected_input_gate.run();
490 NEScheduler::get().schedule(&_transpose_input_gate, Window::DimY);
491 _gemm_input_gate.run();
492 NEScheduler::get().schedule(&_accum_input_gate1, Window::DimY);
493 if(_run_peephole_opt)
494 {
495 NEScheduler::get().schedule(&_pixelwise_mul_input_gate, Window::DimY);
496 _accum_input_gate2.run();
497 }
498 NEScheduler::get().schedule(&_activation_input_gate, Window::DimY);
499 }
500
501 _fully_connected_cell_state.run();
502 NEScheduler::get().schedule(&_transpose_cell_state, Window::DimY);
503 _gemm_cell_state1.run();
504 NEScheduler::get().schedule(&_accum_cell_state1, Window::DimY);
505 NEScheduler::get().schedule(&_activation_cell_state, Window::DimY);
506 NEScheduler::get().schedule(&_pixelwise_mul_cell_state1, Window::DimY);
507 NEScheduler::get().schedule(&_pixelwise_mul_cell_state2, Window::DimY);
508 NEScheduler::get().schedule(&_accum_cell_state2, Window::DimY);
509
510 if(_perform_cell_clipping)
511 {
512 NEScheduler::get().schedule(&_cell_clip, Window::DimY);
513 }
514
515 _fully_connected_output.run();
516 NEScheduler::get().schedule(&_transpose_output, Window::DimY);
517 _gemm_output.run();
518 NEScheduler::get().schedule(&_accum_output1, Window::DimY);
519
520 if(_run_peephole_opt)
521 {
522 NEScheduler::get().schedule(&_pixelwise_mul_output_state1, Window::DimY);
523 _accum_output2.run();
524 }
525 NEScheduler::get().schedule(&_activation_output, Window::DimY);
526
527 NEScheduler::get().schedule(&_activation_output_state, Window::DimY);
528 NEScheduler::get().schedule(&_pixelwise_mul_output_state2, Window::DimY);
529
530 if(_has_projection_weights)
531 {
532 _fully_connected_output_state.run();
533 if(_perform_projection_clipping)
534 {
535 NEScheduler::get().schedule(&_projection_clip, Window::DimY);
536 }
537 }
538
539 NEScheduler::get().schedule(&_copy_cell_state, Window::DimY);
540 NEScheduler::get().schedule(&_copy_output, Window::DimY);
541
542 _concat_scratch_buffer.run();
543
544 _memory_group.release();
545}