blob: 5dd1f00fa666dc395ccd372948f4bb2a880ddf7e [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
25
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010026#include "arm_compute/core/Size2D.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010027#include "arm_compute/core/Validate.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000029#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010030#include "arm_compute/runtime/CL/CLScheduler.h"
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010031#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032
33#include <algorithm>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010035using namespace arm_compute;
Georgios Pinitas358ca202017-12-07 16:47:52 +000036using namespace arm_compute::misc::shape_calculator;
37
38namespace
39{
40Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output, bool is_interleaved_transposed)
41{
42 const GPUTarget gpu_target = CLScheduler::get().target();
43
44 if(is_data_type_quantized_asymmetric(input.data_type()))
45 {
46 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
47 // Extract and negate input and weights offset
48 const QuantizationInfo input_quantization_info(input.quantization_info().scale, -input.quantization_info().offset);
49 const QuantizationInfo weights_quantization_info(weights.quantization_info().scale, -weights.quantization_info().offset);
50
51 // Validate gemmlowp function
52 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
53 &weights.clone()->set_quantization_info(weights_quantization_info),
54 &output));
55 }
56 else
57 {
Gian Marco36a0a462018-01-12 10:21:40 +000058 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixMultiplyKernel::validate(&input, &weights, &output, 1.f, is_interleaved_transposed, GEMMReshapeInfo(), gpu_target));
Georgios Pinitas358ca202017-12-07 16:47:52 +000059 }
60
61 return Status{};
62}
63} // namespace
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010064
65void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +010066{
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010067 auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
68 k->configure(input, output);
69 _kernel = std::move(k);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010070}
71
Georgios Pinitas358ca202017-12-07 16:47:52 +000072Status CLFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
73{
74 return CLTransposeKernel::validate(input, output);
75}
76
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010077CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000078 : _memory_group(memory_manager), _im2col_kernel(), _reshape_weights_kernel(), _mm_kernel(), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _accumulate_biases_kernel(), _im2col_output(),
Georgios Pinitas1562be32018-03-08 19:09:19 +000079 _gemmlowp_output(), _reshape_weights_output(), _are_weights_reshaped(true), _is_fc_after_conv(true), _accumulate_biases(false), _is_quantized(false), _original_weights(nullptr)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010080{
81}
82
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000083void CLFullyConnectedLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool is_interleaved_transposed)
84{
85 if(_is_quantized)
86 {
Chunosov5124be52017-11-22 20:42:13 +070087 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000088 // Extract and negate input and weights offset
Chunosov5124be52017-11-22 20:42:13 +070089 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
90 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
91
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000092 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
93 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
Chunosov5124be52017-11-22 20:42:13 +070094
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000095 // Configure gemmlowp function
96 _mm_gemmlowp.configure(input, weights, output);
Chunosov5124be52017-11-22 20:42:13 +070097
98 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
99 input->info()->set_quantization_info(input_quantization_info);
100 weights->info()->set_quantization_info(weights_quantization_info);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000101 }
102 else
103 {
104 // Configure matrix multiply kernel
105 _mm_kernel.set_target(CLScheduler::get().target());
106 _mm_kernel.configure(input, weights, output, 1.f, is_interleaved_transposed);
107 }
108}
109
110void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100111{
112 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
113
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100114 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
115
116 // Initialize output tensor for im2col
Giorgio Arena156fcf32018-03-09 15:30:43 +0000117 TensorShape shape_im2col = compute_im2col_fc_shape(input->info());
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000118 _im2col_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100119
120 // Configure im2col kernel
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100121 _memory_group.manage(&_im2col_output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100122 _im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
123
124 // Configure matrix multiply kernel
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000125 configure_mm(&_im2col_output, weights, output, false);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100126
127 // Allocate the output tensor for im2col once all the configure methods have been called
128 _im2col_output.allocator()->allocate();
129}
130
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000131void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100132{
133 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
134
135 // Configure matrix multiply kernel
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000136 configure_mm(input, weights, output, false);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100137}
138
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100139void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights, bool are_weights_reshaped)
140{
Georgios Pinitas358ca202017-12-07 16:47:52 +0000141 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
142
143 // Perform validate step
144 ARM_COMPUTE_ERROR_THROW_ON(CLFullyConnectedLayer::validate(input->info(),
145 weights->info(),
146 biases != nullptr ? biases->info() : nullptr,
147 output->info(),
148 transpose_weights,
149 are_weights_reshaped));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100150
151 _are_weights_reshaped = transpose_weights ? are_weights_reshaped : true;
152 _is_fc_after_conv = true;
153 _accumulate_biases = false;
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000154 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
Georgios Pinitas1562be32018-03-08 19:09:19 +0000155 _original_weights = weights;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100156
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000157 // Configure gemmlowp output
158 if(_is_quantized)
159 {
160 _gemmlowp_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
161 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000162
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000163 // Configure accumulate biases kernel for non quantized asymmetric types
164 if(biases != nullptr && !_is_quantized)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100165 {
166 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
167
168 _accumulate_biases = true;
169
170 // Configure accumulate biases kernel
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000171 _accumulate_biases_kernel.set_target(CLScheduler::get().target());
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100172 _accumulate_biases_kernel.configure(output, biases);
173 }
174
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100175 // With the Fully Connected layer we can have 4 different cases:
176 // 1) Convolution layer -> Fully Connected layer without batches
177 // 2) Fully Connected layer -> Fully Connected layer without batches
178 // 3) Convolution layer -> Fully Connected layer with batches
179 // 4) Fully Connected layer -> Fully Connected layer with batches
180
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100181 const ICLTensor *weights_to_use = weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100182
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100183 if(!_are_weights_reshaped)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100184 {
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100185 weights_to_use = &_reshape_weights_output;
186
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100187 // Reshape the weights
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100188 _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100189 }
190
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100191 // Check if we have a fully connected layer with batches
192 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
193
194 if(is_batched_fc_layer)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100195 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100196 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
197 input->info()->tensor_shape().cend(),
198 output->info()->tensor_shape().cbegin() + 1));
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100199 }
200 else
201 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100202 _is_fc_after_conv = input->info()->num_dimensions() > 1;
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100203 }
204
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000205 ICLTensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100206 if(_is_fc_after_conv)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100207 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100208 // Fully Connected layer after a Convolution Layer without batches
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000209 configure_conv_fc(input, weights_to_use, tmp_output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100210 }
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100211 else
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100212 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100213 // Fully Connected layer after a Fully Connected Layer without batches
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000214 configure_fc_fc(input, weights_to_use, tmp_output);
215 }
216
217 // Configure output stage for asymmetric quantized types
218 if(_is_quantized)
219 {
220 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
221 int output_multiplier, output_shift;
222 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Gian Marco58c57942017-11-28 09:10:03 +0000223 _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000224 _gemmlowp_output.allocator()->allocate();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100225 }
226
227 // Allocate the transpose tensor if the are_weights_reshaped flag is false and once all the configure methods have been called
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100228 if(!_are_weights_reshaped)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100229 {
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100230 // Allocate the tensor for the weights reshaped
231 _reshape_weights_output.allocator()->allocate();
232 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100233}
234
Georgios Pinitas358ca202017-12-07 16:47:52 +0000235Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights, bool are_weights_reshaped)
236{
237 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
238 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
239 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
240 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
241
242 bool weights_reshaped = transpose_weights ? are_weights_reshaped : true;
243 bool is_fc_after_conv = true;
244 bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
245 const GPUTarget gpu_target = CLScheduler::get().target();
246
Giorgio Arena156fcf32018-03-09 15:30:43 +0000247 const ITensorInfo &im2col_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_im2col_fc_shape(input)));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000248 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
249 const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
250
251 // Configure accumulate biases kernel for non quantized asymmetric types
252 if(biases != nullptr && !is_quantized)
253 {
254 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
255 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixAccumulateBiasesKernel::validate(output, biases, gpu_target));
256 }
257
258 // With the Fully Connected layer we can have 4 different cases:
259 // 1) Convolution layer -> Fully Connected layer without batches
260 // 2) Fully Connected layer -> Fully Connected layer without batches
261 // 3) Convolution layer -> Fully Connected layer with batches
262 // 4) Fully Connected layer -> Fully Connected layer with batches
263
264 const ITensorInfo *input_to_use = input;
265 const ITensorInfo *weights_to_use = weights;
266 const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
267
268 if(!weights_reshaped)
269 {
270 // Validate reshape weights kernel
271 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
272 weights_to_use = &reshaped_weights;
273 }
274
275 // Check if we have a fully connected layer with batches
276 const bool is_batched_fc_layer = output->dimension(1) > 1;
277
278 if(is_batched_fc_layer)
279 {
280 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
281 input->tensor_shape().cend(),
282 output->tensor_shape().cbegin() + 1));
283 }
284 else
285 {
286 is_fc_after_conv = input->num_dimensions() > 1;
287 }
288
289 if(is_fc_after_conv)
290 {
291 // Fully Connected layer after a Convolution Layer without batches
292 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
293
294 // Validate im2col kernel
295 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_input, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false));
296 input_to_use = &im2col_input;
297 }
298 else
299 {
300 // Fully Connected layer after a Fully Connected Layer without batches
301 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
302 }
303 // Validate matrix multiply kernel
304 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output, false));
305
306 // Validate output stage for asymmetric quantized types
307 if(is_quantized)
308 {
309 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&gemmlowp_output, biases, output));
310 }
311
312 return Status{};
313}
314
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100315void CLFullyConnectedLayer::run()
316{
317 // Reshape of the weights (happens only once)
318 if(!_are_weights_reshaped)
319 {
Georgios Pinitas1562be32018-03-08 19:09:19 +0000320 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
321
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100322 _are_weights_reshaped = true;
323 _reshape_weights_kernel.run();
Georgios Pinitas1562be32018-03-08 19:09:19 +0000324
325 // Mark original weights tensor as unused
326 _original_weights->mark_as_unused();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100327 }
328
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100329 _memory_group.acquire();
330
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100331 // Linearize input if it comes from a convolutional layer
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100332 if(_is_fc_after_conv)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100333 {
334 CLScheduler::get().enqueue(_im2col_kernel, false);
335 }
336
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100337 // Run matrix multiply
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000338 if(_is_quantized)
339 {
340 _mm_gemmlowp.run();
341 }
342 else
343 {
344 CLScheduler::get().enqueue(_mm_kernel, !_accumulate_biases);
345 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100346
347 // Accumulate biases if provided
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000348 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100349 {
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000350 _gemmlowp_output_stage.run();
351 }
352 else
353 {
354 if(_accumulate_biases)
355 {
356 CLScheduler::get().enqueue(_accumulate_biases_kernel);
357 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100358 }
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100359
360 _memory_group.release();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100361}