blob: 3c1a560b24a1ac1b3150ffeb6e900ff3655f7878 [file] [log] [blame]
Michalis Spyroubcedf512018-03-22 14:55:08 +00001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_LSTM_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_LSTM_LAYER_FIXTURE
26
27#include "tests/Globals.h"
28#include "tests/framework/Asserts.h"
29#include "tests/framework/Fixture.h"
30#include "tests/validation/reference/ActivationLayer.h"
31#include "tests/validation/reference/ArithmeticAddition.h"
32#include "tests/validation/reference/ArithmeticSubtraction.h"
33#include "tests/validation/reference/FullyConnectedLayer.h"
34#include "tests/validation/reference/GEMM.h"
35#include "tests/validation/reference/PixelWiseMultiplication.h"
36#include "tests/validation/reference/Transpose.h"
37
38namespace arm_compute
39{
40namespace test
41{
42namespace validation
43{
44template <typename TensorType, typename AccessorType, typename FunctionType, typename FunctionParams, typename T>
45class LSTMLayerValidationFixture : public framework::Fixture
46{
47public:
48 template <typename...>
49 void setup(TensorShape input_shape, TensorShape input_weights_shape, TensorShape recurrent_weights_shape, TensorShape cell_bias_shape, TensorShape output_cell_shape, TensorShape output_shape,
50 TensorShape scratch_shape, ActivationLayerInfo info, float cell_threshold, float projection_threshold, DataType data_type, bool projection_opt, bool peephole_opt)
51 {
52 _target = compute_target(input_shape, input_weights_shape, recurrent_weights_shape, cell_bias_shape, output_cell_shape, output_shape, scratch_shape, info, cell_threshold, projection_threshold,
53 data_type, projection_opt, peephole_opt);
54 _reference = compute_reference(input_shape, input_weights_shape, recurrent_weights_shape, cell_bias_shape, output_cell_shape, output_shape, scratch_shape, info, cell_threshold, projection_threshold,
55 data_type, projection_opt, peephole_opt);
56 }
57
58protected:
59 template <typename U>
60 void fill(U &&tensor, int i)
61 {
62 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
63 library->fill(tensor, distribution, i);
64 }
65 template <typename U>
66 void fill_custom_val(U &&tensor, float num, int i)
67 {
68 std::uniform_real_distribution<> distribution(num, num);
69 library->fill(tensor, distribution, i);
70 }
71 TensorType compute_target(const TensorShape &input_shape, const TensorShape &input_weights_shape, const TensorShape &recurrent_weights_shape, const TensorShape &cell_bias_shape,
72 const TensorShape &output_cell_shape, const TensorShape &output_shape, const TensorShape &scratch_shape, ActivationLayerInfo info, float cell_threshold,
73 float projection_threshold, DataType data_type, bool projection_opt, bool peephole_opt)
74 {
75 // Create projection bias shape
76 TensorShape projection_bias_shape{};
77 projection_bias_shape.set(0, output_shape.x());
78
79 // Create tensors
80 TensorType input = create_tensor<TensorType>(input_shape, data_type);
81 TensorType input_to_forget_w = create_tensor<TensorType>(input_weights_shape, data_type);
82 TensorType input_to_cell_w = create_tensor<TensorType>(input_weights_shape, data_type);
83 TensorType input_to_output_w = create_tensor<TensorType>(input_weights_shape, data_type);
84 TensorType recurrent_to_forget_w = create_tensor<TensorType>(recurrent_weights_shape, data_type);
85 TensorType recurrent_to_cell_w = create_tensor<TensorType>(recurrent_weights_shape, data_type);
86 TensorType recurrent_to_output_w = create_tensor<TensorType>(recurrent_weights_shape, data_type);
87 TensorType forget_gate_bias = create_tensor<TensorType>(cell_bias_shape, data_type);
88 TensorType cell_bias = create_tensor<TensorType>(cell_bias_shape, data_type);
89 TensorType output_gate_bias = create_tensor<TensorType>(cell_bias_shape, data_type);
90 TensorType output_state = create_tensor<TensorType>(output_shape, data_type);
91 TensorType cell_state = create_tensor<TensorType>(output_cell_shape, data_type);
92 TensorType scratch = create_tensor<TensorType>(scratch_shape, data_type);
93 TensorType output = create_tensor<TensorType>(output_shape, data_type);
94 TensorType input_to_input_w;
95 TensorType recurrent_to_input_w;
96 TensorType cell_to_input_w;
97 TensorType cell_to_forget_w;
98 TensorType input_gate_bias;
99 TensorType cell_to_output_w;
100 TensorType projection_w;
101 TensorType projection_bias;
102
103 bool cifg_opt = scratch_shape.x() == cell_bias_shape.x() * 4 ? true : false;
104
105 FunctionParams lstm_params;
106
107 if(!cifg_opt)
108 {
109 input_to_input_w = create_tensor<TensorType>(input_weights_shape, data_type);
110 recurrent_to_input_w = create_tensor<TensorType>(recurrent_weights_shape, data_type);
111 cell_to_input_w = create_tensor<TensorType>(cell_bias_shape, data_type);
112 input_gate_bias = create_tensor<TensorType>(cell_bias_shape, data_type);
113 lstm_params.set_cifg_params(&input_to_input_w, &recurrent_to_input_w, &cell_to_input_w, &input_gate_bias);
114 }
115
116 if(peephole_opt)
117 {
Michalis Spyroubcedf512018-03-22 14:55:08 +0000118 cell_to_forget_w = create_tensor<TensorType>(cell_bias_shape, data_type);
119 cell_to_output_w = create_tensor<TensorType>(cell_bias_shape, data_type);
Michalis Spyrou09daf4d2018-06-28 17:07:22 +0100120 lstm_params.set_peephole_params(&cell_to_forget_w, &cell_to_output_w);
Michalis Spyroubcedf512018-03-22 14:55:08 +0000121 }
122
123 if(projection_opt)
124 {
125 projection_w = create_tensor<TensorType>(recurrent_weights_shape, data_type);
126 projection_bias = create_tensor<TensorType>(projection_bias_shape, data_type);
127 lstm_params.set_projection_params(&projection_w, &projection_bias);
128 }
129
130 // Create and configure function
131 FunctionType lstm;
132 lstm.configure(&input, &input_to_forget_w, &input_to_cell_w, &input_to_output_w, &recurrent_to_forget_w,
133 &recurrent_to_cell_w, &recurrent_to_output_w, &forget_gate_bias, &cell_bias, &output_gate_bias, &output_state, &cell_state,
134 &scratch, &output, lstm_params, info, cell_threshold, projection_threshold);
135
136 ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS);
137 ARM_COMPUTE_EXPECT(input_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
138 ARM_COMPUTE_EXPECT(input_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS);
139 ARM_COMPUTE_EXPECT(input_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
140 ARM_COMPUTE_EXPECT(recurrent_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
141 ARM_COMPUTE_EXPECT(recurrent_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS);
142 ARM_COMPUTE_EXPECT(recurrent_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
143 ARM_COMPUTE_EXPECT(forget_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
144 ARM_COMPUTE_EXPECT(cell_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
145 ARM_COMPUTE_EXPECT(output_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
146 ARM_COMPUTE_EXPECT(output_state.info()->is_resizable(), framework::LogLevel::ERRORS);
147 ARM_COMPUTE_EXPECT(cell_state.info()->is_resizable(), framework::LogLevel::ERRORS);
148 ARM_COMPUTE_EXPECT(scratch.info()->is_resizable(), framework::LogLevel::ERRORS);
149 ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
150
151 // Allocate tensors
152 input.allocator()->allocate();
153 input_to_forget_w.allocator()->allocate();
154 input_to_cell_w.allocator()->allocate();
155 input_to_output_w.allocator()->allocate();
156 recurrent_to_forget_w.allocator()->allocate();
157 recurrent_to_cell_w.allocator()->allocate();
158 recurrent_to_output_w.allocator()->allocate();
159 forget_gate_bias.allocator()->allocate();
160 cell_bias.allocator()->allocate();
161 output_gate_bias.allocator()->allocate();
162 output_state.allocator()->allocate();
163 cell_state.allocator()->allocate();
164 scratch.allocator()->allocate();
165 output.allocator()->allocate();
166
167 ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS);
168 ARM_COMPUTE_EXPECT(!input_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
169 ARM_COMPUTE_EXPECT(!input_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS);
170 ARM_COMPUTE_EXPECT(!input_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
171 ARM_COMPUTE_EXPECT(!recurrent_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
172 ARM_COMPUTE_EXPECT(!recurrent_to_cell_w.info()->is_resizable(), framework::LogLevel::ERRORS);
173 ARM_COMPUTE_EXPECT(!recurrent_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
174 ARM_COMPUTE_EXPECT(!forget_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
175 ARM_COMPUTE_EXPECT(!cell_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
176 ARM_COMPUTE_EXPECT(!output_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
177 ARM_COMPUTE_EXPECT(!output_state.info()->is_resizable(), framework::LogLevel::ERRORS);
178 ARM_COMPUTE_EXPECT(!cell_state.info()->is_resizable(), framework::LogLevel::ERRORS);
179 ARM_COMPUTE_EXPECT(!scratch.info()->is_resizable(), framework::LogLevel::ERRORS);
180 ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
181
182 // Fill tensors
183 fill(AccessorType(input), 0);
184 fill(AccessorType(input_to_forget_w), 1);
185 fill(AccessorType(input_to_cell_w), 2);
186 fill(AccessorType(input_to_output_w), 3);
187 fill(AccessorType(recurrent_to_forget_w), 4);
188 fill(AccessorType(recurrent_to_cell_w), 5);
189 fill(AccessorType(recurrent_to_output_w), 6);
190 fill(AccessorType(forget_gate_bias), 7);
191 fill(AccessorType(cell_bias), 8);
192 fill(AccessorType(output_gate_bias), 9);
193 fill(AccessorType(output_state), 10);
194 fill(AccessorType(cell_state), 11);
195 fill(AccessorType(scratch), 12);
196
197 if(!cifg_opt)
198 {
199 ARM_COMPUTE_EXPECT(input_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
200 ARM_COMPUTE_EXPECT(recurrent_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
201 ARM_COMPUTE_EXPECT(cell_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
202 ARM_COMPUTE_EXPECT(input_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
203 input_to_input_w.allocator()->allocate();
204 recurrent_to_input_w.allocator()->allocate();
205 cell_to_input_w.allocator()->allocate();
206 input_gate_bias.allocator()->allocate();
207 ARM_COMPUTE_EXPECT(!input_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
208 ARM_COMPUTE_EXPECT(!recurrent_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
209 ARM_COMPUTE_EXPECT(!cell_to_input_w.info()->is_resizable(), framework::LogLevel::ERRORS);
210 ARM_COMPUTE_EXPECT(!input_gate_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
211 fill(AccessorType(input_to_input_w), 13);
212 fill(AccessorType(recurrent_to_input_w), 14);
213 fill(AccessorType(cell_to_input_w), 15);
214 fill(AccessorType(recurrent_to_input_w), 16);
215 fill(AccessorType(input_gate_bias), 17);
216 }
217
218 if(peephole_opt)
219 {
Michalis Spyroubcedf512018-03-22 14:55:08 +0000220 ARM_COMPUTE_EXPECT(cell_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
221 ARM_COMPUTE_EXPECT(cell_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
222 cell_to_forget_w.allocator()->allocate();
223 cell_to_output_w.allocator()->allocate();
224 ARM_COMPUTE_EXPECT(!cell_to_forget_w.info()->is_resizable(), framework::LogLevel::ERRORS);
225 ARM_COMPUTE_EXPECT(!cell_to_output_w.info()->is_resizable(), framework::LogLevel::ERRORS);
226 fill(AccessorType(cell_to_output_w), 18);
227 }
228
229 if(projection_opt)
230 {
231 ARM_COMPUTE_EXPECT(projection_w.info()->is_resizable(), framework::LogLevel::ERRORS);
232 ARM_COMPUTE_EXPECT(projection_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
233
234 projection_w.allocator()->allocate();
235 projection_bias.allocator()->allocate();
236
237 ARM_COMPUTE_EXPECT(!projection_w.info()->is_resizable(), framework::LogLevel::ERRORS);
238 ARM_COMPUTE_EXPECT(!projection_bias.info()->is_resizable(), framework::LogLevel::ERRORS);
239
240 fill(AccessorType(projection_w), 19);
241 fill(AccessorType(projection_bias), 20);
242 }
243
244 // Compute function
245 lstm.run();
246
247 return output;
248 }
249
250 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &input_weights_shape, const TensorShape &recurrent_weights_shape, const TensorShape &cell_bias_shape,
251 const TensorShape &output_cell_shape, const TensorShape &output_shape, const TensorShape &scratch_shape, ActivationLayerInfo info, float cell_threshold,
252 float projection_threshold, DataType data_type, bool projection_opt, bool peephole_opt)
253 {
254 // Create projection bias shape
255 TensorShape projection_bias_shape{};
256 projection_bias_shape.set(0, output_shape.x());
257
258 TensorShape gemm_shape{ 1, output_shape.y() };
259 SimpleTensor<T> gemm_out{ gemm_shape, data_type };
260
261 // Create reference
262 SimpleTensor<T> input{ input_shape, data_type };
263 SimpleTensor<T> input_to_input_w{ input_weights_shape, data_type };
264 SimpleTensor<T> input_to_forget_w{ input_weights_shape, data_type };
265 SimpleTensor<T> input_to_cell_w{ input_weights_shape, data_type };
266 SimpleTensor<T> input_to_output_w{ input_weights_shape, data_type };
267 SimpleTensor<T> recurrent_to_input_w{ recurrent_weights_shape, data_type };
268 SimpleTensor<T> recurrent_to_forget_w{ recurrent_weights_shape, data_type };
269 SimpleTensor<T> recurrent_to_cell_w{ recurrent_weights_shape, data_type };
270 SimpleTensor<T> recurrent_to_output_w{ recurrent_weights_shape, data_type };
271 SimpleTensor<T> cell_to_input_w{ cell_bias_shape, data_type };
272 SimpleTensor<T> cell_to_forget_w{ cell_bias_shape, data_type };
273 SimpleTensor<T> cell_to_output_w{ cell_bias_shape, data_type };
274 SimpleTensor<T> input_gate_bias{ cell_bias_shape, data_type };
275 SimpleTensor<T> forget_gate_bias{ cell_bias_shape, data_type };
276 SimpleTensor<T> cell_bias{ cell_bias_shape, data_type };
277 SimpleTensor<T> output_gate_bias{ cell_bias_shape, data_type };
278 SimpleTensor<T> projection_w{ recurrent_weights_shape, data_type };
279 SimpleTensor<T> projection_bias{ projection_bias_shape, data_type };
280 SimpleTensor<T> output_state{ output_shape, data_type };
281 SimpleTensor<T> cell_state{ output_cell_shape, data_type };
282 SimpleTensor<T> scratch{ scratch_shape, data_type };
283 SimpleTensor<T> output{ output_shape, data_type };
284
285 // Fill reference
286 fill(input, 0);
287 fill(input_to_forget_w, 1);
288 fill(input_to_cell_w, 2);
289 fill(input_to_output_w, 3);
290 fill(recurrent_to_forget_w, 4);
291 fill(recurrent_to_cell_w, 5);
292 fill(recurrent_to_output_w, 6);
293 fill(forget_gate_bias, 7);
294 fill(cell_bias, 8);
295 fill(output_gate_bias, 9);
296 fill(output_state, 10);
297 fill(cell_state, 11);
298 fill(scratch, 12);
299 fill(input_to_input_w, 13);
300 fill(recurrent_to_input_w, 14);
301 fill(cell_to_input_w, 15);
302 fill(recurrent_to_input_w, 16);
303 fill(input_gate_bias, 17);
304 fill(cell_to_output_w, 18);
305 fill(projection_w, 19);
306 fill(projection_bias, 20);
307
308 bool cifg_opt = scratch_shape.x() == cell_bias_shape.x() * 4 ? true : false;
309
310 // Compute forget_gate
311 SimpleTensor<T> fully_connected_forget = reference::fully_connected_layer(input, input_to_forget_w, forget_gate_bias, output_cell_shape);
312 SimpleTensor<T> transposed_weights = reference::transpose(recurrent_to_forget_w);
313 SimpleTensor<T> gemm = reference::gemm(output_state, transposed_weights, cell_state, 1.f, 0.f);
314 SimpleTensor<T> forget_gate = reference::arithmetic_addition(fully_connected_forget, gemm, data_type, ConvertPolicy::SATURATE);
315
316 if(peephole_opt)
317 {
Georgios Pinitas42a31722018-07-09 14:35:32 +0100318 SimpleTensor<T> pixelwise_mul_forget_gate = reference::pixel_wise_multiplication(cell_state, cell_to_forget_w, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
319 forget_gate = reference::arithmetic_addition(forget_gate, pixelwise_mul_forget_gate, data_type, ConvertPolicy::SATURATE);
Michalis Spyroubcedf512018-03-22 14:55:08 +0000320 }
321
322 forget_gate = reference::activation_layer(forget_gate, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
323
324 // Compute input_gate
325 SimpleTensor<T> input_gate;
326 if(cifg_opt)
327 {
328 SimpleTensor<T> ones{ cell_bias_shape, data_type };
329 fill_custom_val(ones, 1.f, 0);
330 input_gate = reference::arithmetic_subtraction<T, T, T>(ones, forget_gate, data_type, ConvertPolicy::SATURATE);
331 }
332 else
333 {
Georgios Pinitas42a31722018-07-09 14:35:32 +0100334 SimpleTensor<T> fully_connected_input = reference::fully_connected_layer(input, input_to_input_w, input_gate_bias, output_cell_shape);
335 transposed_weights = reference::transpose(recurrent_to_input_w);
336 gemm = reference::gemm(output_state, transposed_weights, cell_state, 1.f, 0.f);
337 input_gate = reference::arithmetic_addition(fully_connected_input, gemm, data_type, ConvertPolicy::SATURATE);
338 SimpleTensor<T> pixelwise_mul_input_gate = reference::pixel_wise_multiplication(cell_state, cell_to_input_w, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
339 input_gate = reference::arithmetic_addition(input_gate, pixelwise_mul_input_gate, data_type, ConvertPolicy::SATURATE);
340 input_gate = reference::activation_layer(input_gate, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
Michalis Spyroubcedf512018-03-22 14:55:08 +0000341 }
342
343 // Compute cell_state
344 SimpleTensor<T> fully_connected_cell_state = reference::fully_connected_layer(input, input_to_cell_w, cell_bias, output_cell_shape);
345 transposed_weights = reference::transpose(recurrent_to_cell_w);
346 gemm = reference::gemm(output_state, transposed_weights, cell_state, 1.f, 0.f);
347 SimpleTensor<T> pixelwise_mul = reference::pixel_wise_multiplication(cell_state, forget_gate, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
348 cell_state = reference::arithmetic_addition(fully_connected_cell_state, gemm, data_type, ConvertPolicy::SATURATE);
349 cell_state = reference::activation_layer(cell_state, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
350 cell_state = reference::pixel_wise_multiplication(cell_state, input_gate, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
351 cell_state = reference::arithmetic_addition(cell_state, pixelwise_mul, data_type, ConvertPolicy::SATURATE);
352 if(cell_threshold != 0.f)
353 {
354 cell_state = reference::activation_layer(cell_state, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -cell_threshold, cell_threshold));
355 }
356
357 // Compute output
358 SimpleTensor<T> fully_connected_output = reference::fully_connected_layer(input, input_to_output_w, output_gate_bias, output_cell_shape);
359 transposed_weights = reference::transpose(recurrent_to_output_w);
360 gemm = reference::gemm(output_state, transposed_weights, cell_state, 1.f, 0.f);
361 output = reference::arithmetic_addition(fully_connected_output, gemm, data_type, ConvertPolicy::SATURATE);
362 if(peephole_opt)
363 {
Georgios Pinitas42a31722018-07-09 14:35:32 +0100364 pixelwise_mul = reference::pixel_wise_multiplication(cell_state, cell_to_output_w, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
365 output = reference::arithmetic_addition(output, pixelwise_mul, data_type, ConvertPolicy::SATURATE);
Michalis Spyroubcedf512018-03-22 14:55:08 +0000366 }
367 output = reference::activation_layer(output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
368
369 // Compute output state
370 SimpleTensor<T> cell_state_activation = reference::activation_layer(cell_state, info);
371 output_state = reference::pixel_wise_multiplication(output, cell_state_activation, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
372
373 if(projection_opt)
374 {
375 SimpleTensor<T> fully_connected_projection = reference::fully_connected_layer(output_state, projection_w, projection_bias, output_cell_shape);
376 if(projection_threshold != 0.f)
377 {
378 output_state = reference::activation_layer(fully_connected_projection, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -projection_threshold, projection_threshold));
379 }
380 }
381 return output_state;
382 }
383
384 TensorType _target{};
385 SimpleTensor<T> _reference{};
386};
387} // namespace validation
388} // namespace test
389} // namespace arm_compute
390#endif /* ARM_COMPUTE_TEST_LSTM_LAYER_FIXTURE */