blob: 9248bc559ba216ae66aa7bc00196f9cce026aa35 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
25
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010026#include "arm_compute/core/Size2D.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010027#include "arm_compute/core/Validate.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000029#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010030#include "arm_compute/runtime/CL/CLScheduler.h"
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010031#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032
33#include <algorithm>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010035using namespace arm_compute;
Georgios Pinitas358ca202017-12-07 16:47:52 +000036using namespace arm_compute::misc::shape_calculator;
37
38namespace
39{
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010040Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
Georgios Pinitas358ca202017-12-07 16:47:52 +000041{
Georgios Pinitas358ca202017-12-07 16:47:52 +000042 if(is_data_type_quantized_asymmetric(input.data_type()))
43 {
44 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
45 // Extract and negate input and weights offset
46 const QuantizationInfo input_quantization_info(input.quantization_info().scale, -input.quantization_info().offset);
47 const QuantizationInfo weights_quantization_info(weights.quantization_info().scale, -weights.quantization_info().offset);
48
49 // Validate gemmlowp function
50 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
51 &weights.clone()->set_quantization_info(weights_quantization_info),
52 &output));
53 }
54 else
55 {
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010056 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input, &weights, nullptr, &output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
Georgios Pinitas358ca202017-12-07 16:47:52 +000057 }
58
59 return Status{};
60}
61} // namespace
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010062
63void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +010064{
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010065 auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
66 k->configure(input, output);
67 _kernel = std::move(k);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010068}
69
Georgios Pinitas358ca202017-12-07 16:47:52 +000070Status CLFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
71{
72 return CLTransposeKernel::validate(input, output);
73}
74
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010075CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010076 : _memory_group(memory_manager), _im2col_kernel(), _reshape_weights_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(), _accumulate_biases_kernel(),
77 _im2col_output(), _gemmlowp_output(), _reshape_weights_output(), _are_weights_reshaped(true), _is_fc_after_conv(true), _accumulate_biases(false), _is_quantized(false), _original_weights(nullptr)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010078{
79}
80
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010081void CLFullyConnectedLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000082{
83 if(_is_quantized)
84 {
Chunosov5124be52017-11-22 20:42:13 +070085 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000086 // Extract and negate input and weights offset
Chunosov5124be52017-11-22 20:42:13 +070087 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
88 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
89
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000090 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
91 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
Chunosov5124be52017-11-22 20:42:13 +070092
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000093 // Configure gemmlowp function
94 _mm_gemmlowp.configure(input, weights, output);
Chunosov5124be52017-11-22 20:42:13 +070095
96 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
97 input->info()->set_quantization_info(input_quantization_info);
98 weights->info()->set_quantization_info(weights_quantization_info);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000099 }
100 else
101 {
102 // Configure matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100103 _mm_gemm.configure(input, weights, nullptr, output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */));
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000104 }
105}
106
107void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100108{
109 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
110
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100111 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
112
113 // Initialize output tensor for im2col
Giorgio Arena156fcf32018-03-09 15:30:43 +0000114 TensorShape shape_im2col = compute_im2col_fc_shape(input->info());
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000115 _im2col_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100116
117 // Configure im2col kernel
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100118 _memory_group.manage(&_im2col_output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100119 _im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
Georgios Pinitas17812ba2018-06-04 19:27:13 +0100120 CLScheduler::get().tune_kernel_static(_im2col_kernel);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100121
122 // Configure matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100123 configure_mm(&_im2col_output, weights, output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100124
125 // Allocate the output tensor for im2col once all the configure methods have been called
126 _im2col_output.allocator()->allocate();
127}
128
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000129void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100130{
131 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
132
133 // Configure matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100134 configure_mm(input, weights, output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100135}
136
Michele Di Giorgiob62280a2018-05-31 17:31:05 +0100137void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights, bool are_weights_reshaped,
138 bool retain_internal_weights)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100139{
Georgios Pinitas358ca202017-12-07 16:47:52 +0000140 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
141
142 // Perform validate step
143 ARM_COMPUTE_ERROR_THROW_ON(CLFullyConnectedLayer::validate(input->info(),
144 weights->info(),
145 biases != nullptr ? biases->info() : nullptr,
146 output->info(),
147 transpose_weights,
Michele Di Giorgiob62280a2018-05-31 17:31:05 +0100148 are_weights_reshaped,
149 retain_internal_weights));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100150
151 _are_weights_reshaped = transpose_weights ? are_weights_reshaped : true;
152 _is_fc_after_conv = true;
153 _accumulate_biases = false;
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000154 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
Georgios Pinitas1562be32018-03-08 19:09:19 +0000155 _original_weights = weights;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100156
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000157 // Configure gemmlowp output
158 if(_is_quantized)
159 {
160 _gemmlowp_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
161 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000162
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000163 // Configure accumulate biases kernel for non quantized asymmetric types
164 if(biases != nullptr && !_is_quantized)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100165 {
166 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
167
168 _accumulate_biases = true;
169
170 // Configure accumulate biases kernel
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000171 _accumulate_biases_kernel.set_target(CLScheduler::get().target());
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100172 _accumulate_biases_kernel.configure(output, biases);
173 }
174
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100175 // With the Fully Connected layer we can have 4 different cases:
176 // 1) Convolution layer -> Fully Connected layer without batches
177 // 2) Fully Connected layer -> Fully Connected layer without batches
178 // 3) Convolution layer -> Fully Connected layer with batches
179 // 4) Fully Connected layer -> Fully Connected layer with batches
180
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100181 const ICLTensor *weights_to_use = weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100182
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100183 if(!_are_weights_reshaped)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100184 {
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100185 weights_to_use = &_reshape_weights_output;
186
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100187 // Reshape the weights
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100188 _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100189 }
190
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100191 // Check if we have a fully connected layer with batches
192 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
193
194 if(is_batched_fc_layer)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100195 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100196 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
197 input->info()->tensor_shape().cend(),
198 output->info()->tensor_shape().cbegin() + 1));
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100199 }
200 else
201 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100202 _is_fc_after_conv = input->info()->num_dimensions() > 1;
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100203 }
204
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000205 ICLTensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100206 if(_is_fc_after_conv)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100207 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100208 // Fully Connected layer after a Convolution Layer without batches
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000209 configure_conv_fc(input, weights_to_use, tmp_output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100210 }
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100211 else
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100212 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100213 // Fully Connected layer after a Fully Connected Layer without batches
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000214 configure_fc_fc(input, weights_to_use, tmp_output);
215 }
216
217 // Configure output stage for asymmetric quantized types
218 if(_is_quantized)
219 {
220 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
221 int output_multiplier, output_shift;
222 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Gian Marco58c57942017-11-28 09:10:03 +0000223 _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000224 _gemmlowp_output.allocator()->allocate();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100225 }
Michele Di Giorgiob62280a2018-05-31 17:31:05 +0100226
227 _are_weights_reshaped = _are_weights_reshaped || retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100228}
229
Michele Di Giorgiob62280a2018-05-31 17:31:05 +0100230Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights, bool are_weights_reshaped,
231 bool retain_internal_weights)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000232{
Michele Di Giorgiob62280a2018-05-31 17:31:05 +0100233 ARM_COMPUTE_UNUSED(retain_internal_weights);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000234 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
235 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
236 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
237 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
238
239 bool weights_reshaped = transpose_weights ? are_weights_reshaped : true;
240 bool is_fc_after_conv = true;
241 bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
242 const GPUTarget gpu_target = CLScheduler::get().target();
243
Giorgio Arena156fcf32018-03-09 15:30:43 +0000244 const ITensorInfo &im2col_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_im2col_fc_shape(input)));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000245 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
246 const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
247
248 // Configure accumulate biases kernel for non quantized asymmetric types
249 if(biases != nullptr && !is_quantized)
250 {
251 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
252 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixAccumulateBiasesKernel::validate(output, biases, gpu_target));
253 }
254
255 // With the Fully Connected layer we can have 4 different cases:
256 // 1) Convolution layer -> Fully Connected layer without batches
257 // 2) Fully Connected layer -> Fully Connected layer without batches
258 // 3) Convolution layer -> Fully Connected layer with batches
259 // 4) Fully Connected layer -> Fully Connected layer with batches
260
261 const ITensorInfo *input_to_use = input;
262 const ITensorInfo *weights_to_use = weights;
263 const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
264
265 if(!weights_reshaped)
266 {
267 // Validate reshape weights kernel
268 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
269 weights_to_use = &reshaped_weights;
270 }
271
272 // Check if we have a fully connected layer with batches
273 const bool is_batched_fc_layer = output->dimension(1) > 1;
274
275 if(is_batched_fc_layer)
276 {
277 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
278 input->tensor_shape().cend(),
279 output->tensor_shape().cbegin() + 1));
280 }
281 else
282 {
283 is_fc_after_conv = input->num_dimensions() > 1;
284 }
285
286 if(is_fc_after_conv)
287 {
288 // Fully Connected layer after a Convolution Layer without batches
289 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
290
291 // Validate im2col kernel
292 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_input, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false));
293 input_to_use = &im2col_input;
294 }
295 else
296 {
297 // Fully Connected layer after a Fully Connected Layer without batches
298 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
299 }
300 // Validate matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100301 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000302
303 // Validate output stage for asymmetric quantized types
304 if(is_quantized)
305 {
306 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&gemmlowp_output, biases, output));
307 }
308
309 return Status{};
310}
311
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100312void CLFullyConnectedLayer::run()
313{
Georgios Pinitase0437672018-05-02 14:07:55 +0100314 prepare();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100315
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100316 _memory_group.acquire();
317
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100318 // Linearize input if it comes from a convolutional layer
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100319 if(_is_fc_after_conv)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100320 {
321 CLScheduler::get().enqueue(_im2col_kernel, false);
322 }
323
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100324 // Run matrix multiply
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000325 if(_is_quantized)
326 {
327 _mm_gemmlowp.run();
328 }
329 else
330 {
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100331 _mm_gemm.run();
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000332 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100333
334 // Accumulate biases if provided
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000335 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100336 {
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000337 _gemmlowp_output_stage.run();
338 }
339 else
340 {
341 if(_accumulate_biases)
342 {
343 CLScheduler::get().enqueue(_accumulate_biases_kernel);
344 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100345 }
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100346
347 _memory_group.release();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100348}
Georgios Pinitase0437672018-05-02 14:07:55 +0100349
350void CLFullyConnectedLayer::prepare()
351{
352 // Reshape of the weights (happens only once)
353 if(!_are_weights_reshaped)
354 {
355 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
356
357 // Run reshape weights kernel and mark weights as unused
358 _reshape_weights_output.allocator()->allocate();
359 _reshape_weights_kernel.run();
360 _original_weights->mark_as_unused();
361
362 // Prepare GEMM prepare and release unused weights
363 if(!_is_quantized)
364 {
365 _mm_gemm.prepare();
366 if(!_reshape_weights_output.is_used())
367 {
368 _reshape_weights_output.allocator()->free();
369 }
370 }
371
372 CLScheduler::get().queue().finish();
373 _are_weights_reshaped = true;
374 }
375}