Michalis Spyrou | ba27e44 | 2019-05-28 10:04:57 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2019 ARM Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h" |
| 25 | |
| 26 | #include "arm_compute/core/Utils.h" |
| 27 | #include "arm_compute/core/Validate.h" |
| 28 | #include "arm_compute/core/utils/quantization/AsymmHelpers.h" |
| 29 | |
| 30 | #include <cmath> |
| 31 | #include <memory> |
| 32 | #include <tuple> |
| 33 | |
| 34 | namespace arm_compute |
| 35 | { |
| 36 | namespace |
| 37 | { |
| 38 | // Quantization info structures used in the LSTMQuantize layer |
| 39 | const QuantizationInfo qasymm(1.f / 128.f, 128); |
| 40 | const QuantizationInfo qsymm_3(8.f / 32768.f, 0); // qsymm16 with 3 integer bit |
| 41 | const QuantizationInfo qsymm_4(16.f / 32768.f, 0); // qsymm16 with 4 integer bit |
| 42 | const QuantizationInfo qsymm_0(1.f / 32768.f, 0); // qsymm16 with 0 integer bit |
| 43 | } // namespace |
| 44 | |
| 45 | NELSTMLayerQuantized::NELSTMLayerQuantized(std::shared_ptr<IMemoryManager> memory_manager) |
| 46 | : _memory_group(std::move(memory_manager)), _gemmlowp(), _output_stage(), _transpose_weights(), _concat_input_weights(), _concat_recurrent_weights(), _concat_weights(), _concat_inputs(), |
| 47 | _concat_bias(), _sigmoid_forget_gate(), _sigmoid_input_gate(), _sigmoid_output_gate(), _tanh_modulation_gate(), _tanh_output_state(), _add1(), _add2(), _mul1(), _mul2(), _mul3(), |
| 48 | _slice_input_tensor(), _slice_forget_tensor(), _slice_cell_tensor(), _slice_output_tensor(), _dequantize(), _quantize(), _input_to_input_weights(nullptr), _input_to_forget_weights(nullptr), |
| 49 | _input_to_cell_weights(nullptr), _input_to_output_weights(nullptr), _recurrent_to_input_weights(nullptr), _recurrent_to_forget_weights(nullptr), _recurrent_to_cell_weights(nullptr), |
| 50 | _recurrent_to_output_weights(nullptr), _input_gate_bias(nullptr), _forget_gate_bias(nullptr), _cell_bias(nullptr), _output_gate_bias(nullptr), _recurrent_weights(), _input_weights(), _weights(), |
| 51 | _input(), _weights_transposed(), _output_highp(), _output_lowp(), _bias(), _forget_gate_input(), _input_gate_input(), _output_gate_input(), _input_modulation_gate_input(), _forget_gate_output(), |
| 52 | _input_gate_output(), _output_gate_output(), _input_modulation_gate_output(), _cell_state1(), _cell_state2(), _output_state_tmp(), _output_state_out_symm(), _output_state_out_f32(), |
| 53 | _is_prepared(false) |
| 54 | { |
| 55 | } |
| 56 | |
| 57 | void NELSTMLayerQuantized::configure(const ITensor *input, |
| 58 | const ITensor *input_to_input_weights, const ITensor *input_to_forget_weights, const ITensor *input_to_cell_weights, const ITensor *input_to_output_weights, |
| 59 | const ITensor *recurrent_to_input_weights, const ITensor *recurrent_to_forget_weights, const ITensor *recurrent_to_cell_weights, const ITensor *recurrent_to_output_weights, |
| 60 | const ITensor *input_gate_bias, const ITensor *forget_gate_bias, const ITensor *cell_bias, const ITensor *output_gate_bias, |
| 61 | ITensor *cell_state_in, const ITensor *output_state_in, |
| 62 | ITensor *cell_state_out, ITensor *output_state_out) |
| 63 | { |
| 64 | ARM_COMPUTE_ERROR_ON_NULLPTR(input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, |
| 65 | recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, |
| 66 | input_gate_bias, forget_gate_bias, cell_bias, output_gate_bias, cell_state_in, output_state_in, cell_state_out, output_state_out); |
| 67 | |
| 68 | ARM_COMPUTE_ERROR_THROW_ON(NELSTMLayerQuantized::validate(input->info(), input_to_input_weights->info(), input_to_forget_weights->info(), input_to_cell_weights->info(), |
| 69 | input_to_output_weights->info(), |
| 70 | recurrent_to_input_weights->info(), recurrent_to_forget_weights->info(), recurrent_to_cell_weights->info(), recurrent_to_output_weights->info(), |
| 71 | input_gate_bias->info(), forget_gate_bias->info(), cell_bias->info(), output_gate_bias->info(), cell_state_in->info(), output_state_in->info(), cell_state_out->info(), output_state_out->info())); |
| 72 | |
| 73 | const int input_size = input->info()->dimension(0); |
| 74 | const int batch_size = input->info()->dimension(1); |
| 75 | const int output_size = input_to_input_weights->info()->dimension(1); |
| 76 | |
| 77 | const QuantizationInfo qweights = input_to_input_weights->info()->quantization_info(); // Weights quantization |
| 78 | |
| 79 | auto_init_if_empty(*cell_state_out->info(), TensorInfo(TensorShape(batch_size, output_size), 1, DataType::QSYMM16, qsymm_4)); |
| 80 | auto_init_if_empty(*output_state_out->info(), TensorInfo(TensorShape(batch_size, output_size), 1, DataType::QASYMM8, qasymm)); |
| 81 | |
| 82 | _input_to_input_weights = input_to_input_weights; |
| 83 | _input_to_forget_weights = input_to_forget_weights; |
| 84 | _input_to_cell_weights = input_to_cell_weights; |
| 85 | _input_to_output_weights = input_to_output_weights; |
| 86 | _recurrent_to_input_weights = recurrent_to_input_weights; |
| 87 | _recurrent_to_forget_weights = recurrent_to_forget_weights; |
| 88 | _recurrent_to_cell_weights = recurrent_to_cell_weights; |
| 89 | _recurrent_to_output_weights = recurrent_to_output_weights; |
| 90 | _input_gate_bias = input_gate_bias; |
| 91 | _forget_gate_bias = forget_gate_bias; |
| 92 | _cell_bias = cell_bias; |
| 93 | _output_gate_bias = output_gate_bias; |
| 94 | |
| 95 | // Weights concatenation |
| 96 | std::vector<const ITensor *> inputs_weights_vector{ input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights }; |
| 97 | std::vector<const ITensor *> recurrent_weights_vector{ recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights }; |
| 98 | |
| 99 | _input_weights.allocator()->init(TensorInfo(TensorShape(input_size, 4 * output_size), 1, DataType::QASYMM8, qweights)); |
| 100 | _concat_input_weights.configure(inputs_weights_vector, &_input_weights, Window::DimY); |
| 101 | |
| 102 | _recurrent_weights.allocator()->init(TensorInfo(TensorShape(output_size, 4 * output_size), 1, DataType::QASYMM8, qweights)); |
| 103 | _concat_recurrent_weights.configure(recurrent_weights_vector, &_recurrent_weights, Window::DimY); |
| 104 | |
| 105 | std::vector<const ITensor *> weights_vector{ &_recurrent_weights, &_input_weights }; |
| 106 | _weights.allocator()->init(TensorInfo(TensorShape(output_size + input_size, 4 * output_size), 1, DataType::QASYMM8, qweights)); |
| 107 | _concat_weights.configure(weights_vector, &_weights, Window::DimX); |
| 108 | _transpose_weights.configure(&_weights, &_weights_transposed); |
| 109 | |
| 110 | // Input concatenation |
| 111 | std::vector<const ITensor *> input_vector{ input, output_state_in }; |
| 112 | _memory_group.manage(&_input); |
| 113 | _input.allocator()->init(TensorInfo(TensorShape(output_size + input_size, batch_size), 1, DataType::QASYMM8, qasymm)); |
| 114 | _concat_inputs.configure(input_vector, &_input, Window::DimX); |
| 115 | |
| 116 | // Bias concatenation |
| 117 | std::vector<const ITensor *> bias_vector{ input_gate_bias, forget_gate_bias, cell_bias, output_gate_bias }; |
| 118 | _bias.allocator()->init(TensorInfo(TensorShape(4 * output_size), 1, DataType::S32)); |
| 119 | _concat_bias.configure(bias_vector, &_bias, Window::DimX); |
| 120 | |
| 121 | // Invert the offset for gemmlowp |
| 122 | _input.info()->set_quantization_info(QuantizationInfo(qasymm.uniform().scale, -qasymm.uniform().offset)); |
| 123 | _weights_transposed.info()->set_quantization_info(QuantizationInfo(qweights.uniform().scale, -qweights.uniform().offset)); |
| 124 | |
| 125 | // Run gemmlowp |
| 126 | _memory_group.manage(&_output_highp); |
| 127 | _output_highp.allocator()->init(TensorInfo(TensorShape(4 * output_size, batch_size), 1, DataType::S32)); |
| 128 | _gemmlowp.configure(&_input, &_weights_transposed, nullptr, &_output_highp); |
| 129 | _input.allocator()->allocate(); |
| 130 | |
| 131 | // Set the offset back |
| 132 | _input.info()->set_quantization_info(QuantizationInfo(qasymm.uniform().scale, qasymm.uniform().offset)); |
| 133 | _weights_transposed.info()->set_quantization_info(QuantizationInfo(qweights.uniform().scale, qweights.uniform().offset)); |
| 134 | |
| 135 | // multiplier = (input_scale * weights_scale) / output_scale (2 ^ (-12)) |
| 136 | _output_lowp.allocator()->init(TensorInfo(_output_highp.info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_3)); |
| 137 | |
| 138 | const float multiplier = 4096.f * qasymm.uniform().scale * qweights.uniform().scale; |
| 139 | int output_multiplier = 0; |
| 140 | int output_shift = 0; |
Manuel Bottini | 0726398 | 2019-10-17 18:37:26 +0100 | [diff] [blame] | 141 | quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift); |
Michalis Spyrou | ba27e44 | 2019-05-28 10:04:57 +0100 | [diff] [blame] | 142 | |
| 143 | _memory_group.manage(&_output_lowp); |
| 144 | _output_stage.configure(&_output_highp, &_bias, &_output_lowp, output_multiplier, output_shift); |
| 145 | _output_highp.allocator()->allocate(); |
| 146 | _bias.allocator()->allocate(); |
| 147 | |
| 148 | // Get the gate tensors |
Michele Di Giorgio | 601ba3f | 2019-08-22 16:20:04 +0100 | [diff] [blame] | 149 | if(batch_size > 1) |
| 150 | { |
| 151 | _memory_group.manage(&_input_gate_input); |
| 152 | _slice_input_tensor.configure(&_output_lowp, &_input_gate_input, { 0, 0 }, { output_size, batch_size }); |
| 153 | _memory_group.manage(&_forget_gate_input); |
| 154 | _slice_forget_tensor.configure(&_output_lowp, &_forget_gate_input, { output_size, 0 }, { 2 * output_size, batch_size }); |
| 155 | _memory_group.manage(&_input_modulation_gate_input); |
| 156 | _slice_cell_tensor.configure(&_output_lowp, &_input_modulation_gate_input, { 2 * output_size, 0 }, { 3 * output_size, batch_size }); |
| 157 | _memory_group.manage(&_output_gate_input); |
| 158 | _slice_output_tensor.configure(&_output_lowp, &_output_gate_input, { 3 * output_size, 0 }, { 4 * output_size, batch_size }); |
| 159 | _output_lowp.allocator()->allocate(); |
| 160 | } |
| 161 | else |
| 162 | { |
| 163 | _memory_group.manage(&_input_gate_input); |
| 164 | _slice_input_tensor.configure(&_output_lowp, &_input_gate_input, { 0 }, { output_size }); |
| 165 | _memory_group.manage(&_forget_gate_input); |
| 166 | _slice_forget_tensor.configure(&_output_lowp, &_forget_gate_input, { output_size }, { 2 * output_size }); |
| 167 | _memory_group.manage(&_input_modulation_gate_input); |
| 168 | _slice_cell_tensor.configure(&_output_lowp, &_input_modulation_gate_input, { 2 * output_size }, { 3 * output_size }); |
| 169 | _memory_group.manage(&_output_gate_input); |
| 170 | _slice_output_tensor.configure(&_output_lowp, &_output_gate_input, { 3 * output_size }, { 4 * output_size }); |
| 171 | _output_lowp.allocator()->allocate(); |
| 172 | } |
Michalis Spyrou | ba27e44 | 2019-05-28 10:04:57 +0100 | [diff] [blame] | 173 | |
| 174 | // Forget gate |
| 175 | _memory_group.manage(&_forget_gate_output); |
| 176 | _forget_gate_output.allocator()->init(TensorInfo(_forget_gate_input.info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_0)); |
| 177 | _sigmoid_forget_gate.configure(&_forget_gate_input, &_forget_gate_output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); |
| 178 | _forget_gate_input.allocator()->allocate(); |
| 179 | |
| 180 | // Input gate |
| 181 | _memory_group.manage(&_input_gate_output); |
| 182 | _input_gate_output.allocator()->init(TensorInfo(_input_gate_input.info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_0)); |
| 183 | _sigmoid_input_gate.configure(&_input_gate_input, &_input_gate_output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); |
| 184 | _input_gate_input.allocator()->allocate(); |
| 185 | |
| 186 | // Input modulation gate equation |
| 187 | _memory_group.manage(&_input_modulation_gate_output); |
| 188 | _input_modulation_gate_output.allocator()->init(TensorInfo(_input_modulation_gate_input.info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_0)); |
| 189 | _tanh_modulation_gate.configure(&_input_modulation_gate_input, &_input_modulation_gate_output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f)); |
| 190 | _input_modulation_gate_input.allocator()->allocate(); |
| 191 | |
| 192 | // Output gate |
| 193 | _memory_group.manage(&_output_gate_output); |
| 194 | _output_gate_output.allocator()->init(TensorInfo(_output_gate_input.info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_0)); |
| 195 | _sigmoid_output_gate.configure(&_output_gate_input, &_output_gate_output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); |
| 196 | _output_gate_input.allocator()->allocate(); |
| 197 | |
| 198 | // Long term memory |
| 199 | _memory_group.manage(&_cell_state1); |
| 200 | _cell_state1.allocator()->init(TensorInfo(_forget_gate_output.info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_4)); |
| 201 | _mul1.configure(&_forget_gate_output, cell_state_in, &_cell_state1, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO); |
| 202 | _forget_gate_output.allocator()->allocate(); |
| 203 | |
| 204 | _memory_group.manage(&_cell_state2); |
| 205 | _cell_state2.allocator()->init(TensorInfo(_input_gate_output.info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_4)); |
| 206 | _mul2.configure(&_input_gate_output, &_input_modulation_gate_output, &_cell_state2, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO); |
| 207 | _input_modulation_gate_output.allocator()->allocate(); |
| 208 | _input_gate_output.allocator()->allocate(); |
| 209 | |
| 210 | _add1.configure(&_cell_state1, &_cell_state2, cell_state_out, ConvertPolicy::SATURATE); |
| 211 | _cell_state1.allocator()->allocate(); |
| 212 | _cell_state2.allocator()->allocate(); |
| 213 | |
| 214 | // Short term memory |
| 215 | _memory_group.manage(&_output_state_tmp); |
| 216 | _output_state_tmp.allocator()->init(TensorInfo(cell_state_out->info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_0)); |
| 217 | _tanh_output_state.configure(cell_state_out, &_output_state_tmp, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f)); |
| 218 | |
| 219 | _memory_group.manage(&_output_state_out_symm); |
| 220 | _output_state_out_symm.allocator()->init(TensorInfo(_output_gate_output.info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_0)); |
| 221 | _mul3.configure(&_output_state_tmp, &_output_gate_output, &_output_state_out_symm, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO); |
| 222 | _output_gate_output.allocator()->allocate(); |
| 223 | _output_state_tmp.allocator()->allocate(); |
| 224 | |
| 225 | // Requantize the output state from QSYMM16 to QASYMM8 |
| 226 | _memory_group.manage(&_output_state_out_f32); |
| 227 | _output_state_out_f32.allocator()->init(TensorInfo(_output_state_out_symm.info()->tensor_shape(), 1, DataType::F32)); |
| 228 | _dequantize.configure(&_output_state_out_symm, &_output_state_out_f32); |
| 229 | _output_state_out_symm.allocator()->allocate(); |
| 230 | |
| 231 | _quantize.configure(&_output_state_out_f32, output_state_out); |
| 232 | _output_state_out_f32.allocator()->allocate(); |
| 233 | } |
| 234 | |
| 235 | Status NELSTMLayerQuantized::validate(const ITensorInfo *input, |
| 236 | const ITensorInfo *input_to_input_weights, const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights, |
| 237 | const ITensorInfo *recurrent_to_input_weights, const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights, |
| 238 | const ITensorInfo *input_gate_bias, const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias, |
| 239 | const ITensorInfo *cell_state_in, const ITensorInfo *output_state_in, |
| 240 | const ITensorInfo *cell_state_out, const ITensorInfo *output_state_out) |
| 241 | { |
| 242 | ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights, |
| 243 | recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, input_gate_bias, forget_gate_bias, cell_bias, output_gate_bias, cell_state_in, |
| 244 | output_state_in, cell_state_out, output_state_out); |
| 245 | |
| 246 | const int input_size = input->dimension(0); |
| 247 | const int batch_size = input->dimension(1); |
| 248 | const int output_size = input_to_input_weights->dimension(1); |
| 249 | |
| 250 | // Dimensionality checks |
| 251 | ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 2); |
| 252 | ARM_COMPUTE_RETURN_ERROR_ON(input_to_input_weights->num_dimensions() > 2); |
| 253 | ARM_COMPUTE_RETURN_ERROR_ON(input_gate_bias->num_dimensions() > 1); |
| 254 | ARM_COMPUTE_RETURN_ERROR_ON(output_state_in->num_dimensions() > 2); |
| 255 | |
| 256 | TensorInfo input_weights_info(input_to_input_weights->clone()->set_tensor_shape(TensorShape(input_size, output_size)).set_data_type(DataType::QASYMM8)); |
Manuel Bottini | 10c53f1 | 2019-07-17 16:11:53 +0100 | [diff] [blame] | 257 | TensorInfo recurrent_weights_info(input_to_input_weights->clone()->set_tensor_shape(TensorShape(output_size, output_size)).set_data_type(DataType::QASYMM8)); |
Michalis Spyrou | ba27e44 | 2019-05-28 10:04:57 +0100 | [diff] [blame] | 258 | TensorInfo bias_info(input_gate_bias->clone()->set_tensor_shape(TensorShape(output_size)).set_data_type(DataType::S32)); |
| 259 | TensorInfo output_state_info(cell_state_in->clone()->set_tensor_shape(TensorShape(output_size, batch_size)).set_data_type(DataType::QASYMM8).set_quantization_info(qasymm)); |
| 260 | TensorInfo cell_state_info(cell_state_in->clone()->set_tensor_shape(TensorShape(output_size, batch_size)).set_data_type(DataType::QSYMM16).set_quantization_info(qsymm_4)); |
| 261 | |
| 262 | // Shape checks |
| 263 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&input_weights_info, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights); |
| 264 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&recurrent_weights_info, recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights); |
| 265 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&bias_info, input_gate_bias, forget_gate_bias, cell_bias, output_gate_bias); |
| 266 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&cell_state_info, cell_state_in); |
| 267 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&output_state_info, output_state_in); |
| 268 | |
| 269 | // Data type checks |
| 270 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input_weights_info, input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights); |
Manuel Bottini | 10c53f1 | 2019-07-17 16:11:53 +0100 | [diff] [blame] | 271 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights); |
Michalis Spyrou | ba27e44 | 2019-05-28 10:04:57 +0100 | [diff] [blame] | 272 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&bias_info, input_gate_bias, forget_gate_bias, cell_bias, output_gate_bias); |
| 273 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&cell_state_info, cell_state_in); |
| 274 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&output_state_info, output_state_in); |
| 275 | |
| 276 | // Quantization checks |
Manuel Bottini | 10c53f1 | 2019-07-17 16:11:53 +0100 | [diff] [blame] | 277 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(&input_weights_info, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights); |
| 278 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights); |
Michalis Spyrou | ba27e44 | 2019-05-28 10:04:57 +0100 | [diff] [blame] | 279 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(&cell_state_info, cell_state_in); |
| 280 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(&output_state_info, output_state_in); |
| 281 | |
Michele Di Giorgio | 601ba3f | 2019-08-22 16:20:04 +0100 | [diff] [blame] | 282 | // Validate internal functions |
| 283 | // _concat_input_weights |
| 284 | std::vector<const ITensorInfo *> inputs_weights_vector; |
| 285 | inputs_weights_vector.emplace_back(input_to_input_weights); |
| 286 | inputs_weights_vector.emplace_back(input_to_forget_weights); |
| 287 | inputs_weights_vector.emplace_back(input_to_cell_weights); |
| 288 | inputs_weights_vector.emplace_back(input_to_output_weights); |
| 289 | const QuantizationInfo qweights = input_to_input_weights->quantization_info(); // Weights quantization |
| 290 | const TensorInfo input_weights(TensorShape(input_size, 4 * output_size), 1, DataType::QASYMM8, qweights); |
| 291 | ARM_COMPUTE_RETURN_ON_ERROR(NEConcatenateLayer::validate(inputs_weights_vector, &input_weights, Window::DimY)); |
| 292 | |
| 293 | // _concat_recurrent_weights |
| 294 | std::vector<const ITensorInfo *> recurrent_weights_vector; |
| 295 | recurrent_weights_vector.emplace_back(recurrent_to_input_weights); |
| 296 | recurrent_weights_vector.emplace_back(recurrent_to_forget_weights); |
| 297 | recurrent_weights_vector.emplace_back(recurrent_to_cell_weights); |
| 298 | recurrent_weights_vector.emplace_back(recurrent_to_output_weights); |
| 299 | const TensorInfo recurrent_weights(TensorShape(output_size, 4 * output_size), 1, DataType::QASYMM8, qweights); |
| 300 | ARM_COMPUTE_RETURN_ON_ERROR(NEConcatenateLayer::validate(recurrent_weights_vector, &recurrent_weights, Window::DimY)); |
| 301 | |
| 302 | // _concat_weights |
| 303 | std::vector<const ITensorInfo *> weights_vector; |
| 304 | weights_vector.emplace_back(&recurrent_weights); |
| 305 | weights_vector.emplace_back(&input_weights); |
| 306 | const TensorInfo weights(TensorShape(input_size + output_size, 4 * output_size), 1, DataType::QASYMM8, qweights); |
| 307 | ARM_COMPUTE_RETURN_ON_ERROR(NEConcatenateLayer::validate(weights_vector, &weights, Window::DimX)); |
| 308 | // _transpose_weights |
| 309 | const TensorShape weights_transposed_shape(weights.tensor_shape()[1], weights.tensor_shape()[0]); |
| 310 | TensorInfo weights_transposed = weights.clone()->set_is_resizable(true).set_tensor_shape(weights_transposed_shape); |
| 311 | ARM_COMPUTE_RETURN_ON_ERROR(NETranspose::validate(&weights, &weights_transposed)); |
| 312 | |
| 313 | // _concat_inputs |
| 314 | std::vector<const ITensorInfo *> input_vector; |
| 315 | input_vector.emplace_back(input); |
| 316 | input_vector.emplace_back(output_state_in); |
| 317 | TensorInfo input_concatenated(TensorShape(output_size + input_size, batch_size), 1, DataType::QASYMM8, qasymm); |
| 318 | ARM_COMPUTE_RETURN_ON_ERROR(NEConcatenateLayer::validate(input_vector, &input_concatenated, Window::DimX)); |
| 319 | |
| 320 | // _concat_bias |
| 321 | std::vector<const ITensorInfo *> bias_vector; |
| 322 | bias_vector.emplace_back(input_gate_bias); |
| 323 | bias_vector.emplace_back(forget_gate_bias); |
| 324 | bias_vector.emplace_back(cell_bias); |
| 325 | bias_vector.emplace_back(output_gate_bias); |
| 326 | |
| 327 | const TensorInfo bias_concatenated(TensorShape(4 * output_size), 1, DataType::S32); |
| 328 | ARM_COMPUTE_RETURN_ON_ERROR(NEConcatenateLayer::validate(bias_vector, &bias_concatenated, Window::DimX)); |
| 329 | |
| 330 | // Invert the offset for gemmlowp |
| 331 | input_concatenated.set_quantization_info(QuantizationInfo(qasymm.uniform().scale, -qasymm.uniform().offset)); |
| 332 | weights_transposed.set_quantization_info(QuantizationInfo(qweights.uniform().scale, -qweights.uniform().offset)); |
| 333 | |
| 334 | // _gemmlowp |
| 335 | const TensorInfo output_highp(TensorShape(4 * output_size, batch_size), 1, DataType::S32); |
| 336 | ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyCore::validate(&input_concatenated, &weights_transposed, nullptr, &output_highp)); |
| 337 | |
| 338 | // Set the offset back |
| 339 | input_concatenated.set_quantization_info(QuantizationInfo(qasymm.uniform().scale, qasymm.uniform().offset)); |
| 340 | weights_transposed.set_quantization_info(QuantizationInfo(qweights.uniform().scale, qweights.uniform().offset)); |
| 341 | |
Michele Di Giorgio | 601ba3f | 2019-08-22 16:20:04 +0100 | [diff] [blame] | 342 | const TensorInfo output_lowp(output_highp.tensor_shape(), 1, DataType::QSYMM16, qsymm_3); |
| 343 | |
Manuel Bottini | 0726398 | 2019-10-17 18:37:26 +0100 | [diff] [blame] | 344 | const float multiplier = 4096.f * qasymm.uniform().scale * qweights.uniform().scale; |
| 345 | int output_multiplier = 0; |
| 346 | int output_shift = 0; |
| 347 | ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift)); |
| 348 | |
Michele Di Giorgio | 601ba3f | 2019-08-22 16:20:04 +0100 | [diff] [blame] | 349 | // _output_stage |
| 350 | ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&output_highp, &bias_concatenated, &output_lowp)); |
| 351 | |
| 352 | TensorInfo input_gate_input; |
| 353 | TensorInfo forget_gate_input; |
| 354 | TensorInfo input_modulation_gate_input; |
| 355 | TensorInfo output_gate_input; |
| 356 | |
| 357 | if(batch_size > 1) |
| 358 | { |
| 359 | // _slice_input_tensor |
| 360 | input_gate_input = TensorInfo(TensorShape(output_size, batch_size), 1, DataType::QSYMM16, qsymm_3); |
| 361 | ARM_COMPUTE_RETURN_ON_ERROR(NESlice::validate(&output_lowp, &input_gate_input, { 0, 0 }, { output_size, batch_size })); |
| 362 | // _slice_forget_tensor |
| 363 | forget_gate_input = TensorInfo(TensorShape(output_size, batch_size), 1, DataType::QSYMM16, qsymm_3); |
| 364 | ARM_COMPUTE_RETURN_ON_ERROR(NESlice::validate(&output_lowp, &forget_gate_input, { output_size, 0 }, { 2 * output_size, batch_size })); |
| 365 | // _slice_cell_tensor |
| 366 | input_modulation_gate_input = TensorInfo(TensorShape(output_size, batch_size), 1, DataType::QSYMM16, qsymm_3); |
| 367 | ARM_COMPUTE_RETURN_ON_ERROR(NESlice::validate(&output_lowp, &input_modulation_gate_input, { 2 * output_size, 0 }, { 3 * output_size, batch_size })); |
| 368 | // _slice_output_tensor |
| 369 | output_gate_input = TensorInfo(TensorShape(output_size, batch_size), 1, DataType::QSYMM16, qsymm_3); |
| 370 | ARM_COMPUTE_RETURN_ON_ERROR(NESlice::validate(&output_lowp, &output_gate_input, { 3 * output_size, 0 }, { 4 * output_size, batch_size })); |
| 371 | } |
| 372 | else |
| 373 | { |
| 374 | // _slice_input_tensor |
| 375 | input_gate_input = TensorInfo(TensorShape(output_size), 1, DataType::QSYMM16, qsymm_3); |
| 376 | ARM_COMPUTE_RETURN_ON_ERROR(NESlice::validate(&output_lowp, &input_gate_input, { 0 }, { output_size })); |
| 377 | // _slice_forget_tensor |
| 378 | forget_gate_input = TensorInfo(TensorShape(output_size), 1, DataType::QSYMM16, qsymm_3); |
| 379 | ARM_COMPUTE_RETURN_ON_ERROR(NESlice::validate(&output_lowp, &forget_gate_input, { output_size }, { 2 * output_size })); |
| 380 | // _slice_cell_tensor |
| 381 | input_modulation_gate_input = TensorInfo(TensorShape(output_size), 1, DataType::QSYMM16, qsymm_3); |
| 382 | ARM_COMPUTE_RETURN_ON_ERROR(NESlice::validate(&output_lowp, &input_modulation_gate_input, { 2 * output_size }, { 3 * output_size })); |
| 383 | // _slice_output_tensor |
| 384 | output_gate_input = TensorInfo(TensorShape(output_size), 1, DataType::QSYMM16, qsymm_3); |
| 385 | ARM_COMPUTE_RETURN_ON_ERROR(NESlice::validate(&output_lowp, &output_gate_input, { 3 * output_size }, { 4 * output_size })); |
| 386 | } |
| 387 | |
| 388 | // _sigmoid_forget_gate |
| 389 | const TensorInfo forget_gate_output(forget_gate_input.tensor_shape(), 1, DataType::QSYMM16, qsymm_0); |
| 390 | ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(&forget_gate_input, &forget_gate_output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC))); |
| 391 | // _sigmoid_input_gate |
| 392 | const TensorInfo input_gate_output(input_gate_input.tensor_shape(), 1, DataType::QSYMM16, qsymm_0); |
| 393 | ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(&input_gate_input, &input_gate_output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC))); |
| 394 | // _tanh_modulation_gate |
| 395 | const TensorInfo input_modulation_gate_output(input_modulation_gate_input.tensor_shape(), 1, DataType::QSYMM16, qsymm_0); |
| 396 | ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(&input_modulation_gate_input, &input_modulation_gate_output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f))); |
| 397 | // _sigmoid_output_gate |
| 398 | const TensorInfo output_gate_output(output_gate_input.tensor_shape(), 1, DataType::QSYMM16, qsymm_0); |
| 399 | ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(&output_gate_input, &output_gate_output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC))); |
| 400 | |
| 401 | // _mul_forget_gate_cell_state |
| 402 | const TensorInfo cell_state_tmp1(forget_gate_output.tensor_shape(), 1, DataType::QSYMM16, qsymm_4); |
| 403 | ARM_COMPUTE_RETURN_ON_ERROR(NEPixelWiseMultiplication::validate(&forget_gate_output, cell_state_in, &cell_state_tmp1, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO)); |
| 404 | |
| 405 | // _mul_input_gate_input_mod_gate |
| 406 | const TensorInfo cell_state_tmp2(input_gate_output.tensor_shape(), 1, DataType::QSYMM16, qsymm_4); |
| 407 | ARM_COMPUTE_RETURN_ON_ERROR(NEPixelWiseMultiplication::validate(&input_gate_output, &input_modulation_gate_output, &cell_state_tmp2, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO)); |
| 408 | |
| 409 | // _add_cell_state_tmps |
| 410 | ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&cell_state_tmp1, &cell_state_tmp2, cell_state_out, ConvertPolicy::SATURATE)); |
| 411 | |
| 412 | // _tanh_modulation_gate |
| 413 | const TensorInfo output_state_tmp(cell_state_out->tensor_shape(), 1, DataType::QSYMM16, qsymm_0); |
| 414 | ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(cell_state_out, &output_state_tmp, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f))); |
| 415 | |
| 416 | // _mul_output_state_tmp_output_gate |
| 417 | const TensorInfo output_state_out_symm(output_gate_output.tensor_shape(), 1, DataType::QSYMM16, qsymm_0); |
| 418 | ARM_COMPUTE_RETURN_ON_ERROR(NEPixelWiseMultiplication::validate(&output_state_tmp, &output_gate_output, &output_state_out_symm, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO)); |
| 419 | |
| 420 | // _dequantize |
| 421 | const TensorInfo output_state_out_f32(output_state_out_symm.tensor_shape(), 1, DataType::F32); |
| 422 | ARM_COMPUTE_RETURN_ON_ERROR(NEDequantizationLayer::validate(&output_state_out_symm, &output_state_out_f32)); |
| 423 | |
| 424 | // _quantize |
| 425 | ARM_COMPUTE_RETURN_ON_ERROR(NEQuantizationLayer::validate(&output_state_out_f32, output_state_out)); |
| 426 | |
Michalis Spyrou | ba27e44 | 2019-05-28 10:04:57 +0100 | [diff] [blame] | 427 | if(cell_state_out->total_size() != 0) |
| 428 | { |
| 429 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&cell_state_info, cell_state_out); |
| 430 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&cell_state_info, cell_state_out); |
| 431 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(&cell_state_info, cell_state_out); |
| 432 | } |
| 433 | |
| 434 | if(output_state_out->total_size() != 0) |
| 435 | { |
| 436 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&output_state_info, output_state_out); |
| 437 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&output_state_info, output_state_out); |
| 438 | ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(&output_state_info, output_state_out); |
| 439 | } |
| 440 | |
| 441 | return Status{}; |
| 442 | } |
| 443 | |
| 444 | void NELSTMLayerQuantized::run() |
| 445 | { |
| 446 | prepare(); |
| 447 | |
| 448 | // Acquire all the temporaries |
| 449 | MemoryGroupResourceScope scope_mg(_memory_group); |
| 450 | |
| 451 | // Concat and transpose the input |
| 452 | _concat_inputs.run(); |
| 453 | |
| 454 | // Run gemmlowp |
| 455 | _gemmlowp.run(); |
| 456 | _output_stage.run(); |
| 457 | |
| 458 | // Slice the results |
| 459 | _slice_input_tensor.run(); |
| 460 | _slice_forget_tensor.run(); |
| 461 | _slice_cell_tensor.run(); |
| 462 | _slice_output_tensor.run(); |
| 463 | |
| 464 | // Gates |
| 465 | // Forget gate |
| 466 | _sigmoid_forget_gate.run(); |
| 467 | |
| 468 | // Input gate |
| 469 | _sigmoid_input_gate.run(); |
| 470 | |
| 471 | // Input modulation gate |
| 472 | _tanh_modulation_gate.run(); |
| 473 | |
| 474 | // Output gate |
| 475 | _sigmoid_output_gate.run(); |
| 476 | |
| 477 | // Cell state (long term memory) |
| 478 | _mul1.run(); |
| 479 | _mul2.run(); |
| 480 | _add1.run(); |
| 481 | |
| 482 | // Output state (short term memory) |
| 483 | _tanh_output_state.run(); |
| 484 | _mul3.run(); |
| 485 | |
Michele Di Giorgio | 35ea9a7 | 2019-08-23 12:02:06 +0100 | [diff] [blame] | 486 | // Requantize output state from QSYMM16 to QASYMM8 |
Michalis Spyrou | ba27e44 | 2019-05-28 10:04:57 +0100 | [diff] [blame] | 487 | _dequantize.run(); |
| 488 | _quantize.run(); |
| 489 | } |
| 490 | |
| 491 | void NELSTMLayerQuantized::prepare() |
| 492 | { |
| 493 | if(!_is_prepared) |
| 494 | { |
| 495 | _input_weights.allocator()->allocate(); |
| 496 | _concat_input_weights.run(); |
| 497 | |
| 498 | _input_to_input_weights->mark_as_unused(); |
| 499 | _input_to_forget_weights->mark_as_unused(); |
| 500 | _input_to_cell_weights->mark_as_unused(); |
| 501 | _input_to_output_weights->mark_as_unused(); |
| 502 | |
| 503 | _recurrent_weights.allocator()->allocate(); |
| 504 | _concat_recurrent_weights.run(); |
| 505 | _recurrent_to_input_weights->mark_as_unused(); |
| 506 | _recurrent_to_forget_weights->mark_as_unused(); |
| 507 | _recurrent_to_cell_weights->mark_as_unused(); |
| 508 | _recurrent_to_output_weights->mark_as_unused(); |
| 509 | |
| 510 | _weights.allocator()->allocate(); |
| 511 | _concat_weights.run(); |
| 512 | |
| 513 | _input_weights.mark_as_unused(); |
| 514 | _input_weights.allocator()->free(); |
| 515 | _recurrent_weights.mark_as_unused(); |
| 516 | _recurrent_weights.allocator()->free(); |
| 517 | |
| 518 | _weights_transposed.allocator()->allocate(); |
| 519 | _transpose_weights.run(); |
| 520 | |
| 521 | _weights.mark_as_unused(); |
| 522 | _weights.allocator()->free(); |
| 523 | |
| 524 | _bias.allocator()->allocate(); |
| 525 | _concat_bias.run(); |
| 526 | _input_gate_bias->mark_as_unused(); |
| 527 | _forget_gate_bias->mark_as_unused(); |
| 528 | _cell_bias->mark_as_unused(); |
| 529 | _output_gate_bias->mark_as_unused(); |
| 530 | |
| 531 | _is_prepared = true; |
| 532 | } |
| 533 | } |
| 534 | |
| 535 | } // namespace arm_compute |