blob: 6fd78a3fc9a9a49466153faa15212536954379a4 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
25
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010026#include "arm_compute/core/Size2D.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010027#include "arm_compute/core/Validate.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000029#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010030#include "arm_compute/runtime/CL/CLScheduler.h"
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010031#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032
33#include <algorithm>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010035using namespace arm_compute;
Georgios Pinitas358ca202017-12-07 16:47:52 +000036using namespace arm_compute::misc::shape_calculator;
37
38namespace
39{
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010040Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
Georgios Pinitas358ca202017-12-07 16:47:52 +000041{
Georgios Pinitas358ca202017-12-07 16:47:52 +000042 if(is_data_type_quantized_asymmetric(input.data_type()))
43 {
44 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
45 // Extract and negate input and weights offset
46 const QuantizationInfo input_quantization_info(input.quantization_info().scale, -input.quantization_info().offset);
47 const QuantizationInfo weights_quantization_info(weights.quantization_info().scale, -weights.quantization_info().offset);
48
49 // Validate gemmlowp function
50 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
51 &weights.clone()->set_quantization_info(weights_quantization_info),
52 &output));
53 }
54 else
55 {
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010056 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input, &weights, nullptr, &output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
Georgios Pinitas358ca202017-12-07 16:47:52 +000057 }
58
59 return Status{};
60}
61} // namespace
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010062
63void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +010064{
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010065 auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
66 k->configure(input, output);
67 _kernel = std::move(k);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010068}
69
Georgios Pinitas358ca202017-12-07 16:47:52 +000070Status CLFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
71{
72 return CLTransposeKernel::validate(input, output);
73}
74
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010075CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +010076 : _memory_group(memory_manager), _im2col_kernel(), _convert_weights(), _reshape_weights_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(),
77 _accumulate_biases_kernel(), _im2col_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _are_weights_converted(true), _are_weights_reshaped(true),
78 _is_fc_after_conv(true), _accumulate_biases(false), _is_quantized(false), _is_prepared(false), _original_weights(nullptr)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010079{
80}
81
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010082void CLFullyConnectedLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000083{
84 if(_is_quantized)
85 {
Chunosov5124be52017-11-22 20:42:13 +070086 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000087 // Extract and negate input and weights offset
Chunosov5124be52017-11-22 20:42:13 +070088 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
89 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
90
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000091 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
92 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
Chunosov5124be52017-11-22 20:42:13 +070093
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000094 // Configure gemmlowp function
95 _mm_gemmlowp.configure(input, weights, output);
Chunosov5124be52017-11-22 20:42:13 +070096
97 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
98 input->info()->set_quantization_info(input_quantization_info);
99 weights->info()->set_quantization_info(weights_quantization_info);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000100 }
101 else
102 {
103 // Configure matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100104 _mm_gemm.configure(input, weights, nullptr, output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */));
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000105 }
106}
107
108void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100109{
110 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
111
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100112 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
113
114 // Initialize output tensor for im2col
Giorgio Arena156fcf32018-03-09 15:30:43 +0000115 TensorShape shape_im2col = compute_im2col_fc_shape(input->info());
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100116 _im2col_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col).set_data_layout(DataLayout::NCHW));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100117
118 // Configure im2col kernel
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100119 _memory_group.manage(&_im2col_output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100120 _im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
Georgios Pinitas17812ba2018-06-04 19:27:13 +0100121 CLScheduler::get().tune_kernel_static(_im2col_kernel);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100122
123 // Configure matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100124 configure_mm(&_im2col_output, weights, output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100125
126 // Allocate the output tensor for im2col once all the configure methods have been called
127 _im2col_output.allocator()->allocate();
128}
129
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000130void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100131{
132 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
133
134 // Configure matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100135 configure_mm(input, weights, output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100136}
137
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100138void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
139 FullyConnectedLayerInfo fc_info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100140{
Georgios Pinitas358ca202017-12-07 16:47:52 +0000141 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
142
143 // Perform validate step
144 ARM_COMPUTE_ERROR_THROW_ON(CLFullyConnectedLayer::validate(input->info(),
145 weights->info(),
146 biases != nullptr ? biases->info() : nullptr,
147 output->info(),
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100148 fc_info));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100149
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100150 _are_weights_converted = true;
151 _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
152 _is_fc_after_conv = true;
153 _accumulate_biases = false;
154 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
155 _is_prepared = false;
156 _original_weights = weights;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100157
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000158 // Configure gemmlowp output
159 if(_is_quantized)
160 {
161 _gemmlowp_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
162 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000163
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000164 // Configure accumulate biases kernel for non quantized asymmetric types
165 if(biases != nullptr && !_is_quantized)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100166 {
167 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
168
169 _accumulate_biases = true;
170
171 // Configure accumulate biases kernel
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000172 _accumulate_biases_kernel.set_target(CLScheduler::get().target());
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100173 _accumulate_biases_kernel.configure(output, biases);
174 }
175
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100176 const ICLTensor *weights_to_use = weights;
177
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100178 // With the Fully Connected layer we can have 4 different cases:
179 // 1) Convolution layer -> Fully Connected layer without batches
180 // 2) Fully Connected layer -> Fully Connected layer without batches
181 // 3) Convolution layer -> Fully Connected layer with batches
182 // 4) Fully Connected layer -> Fully Connected layer with batches
183
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100184 // Check if we have a fully connected layer with batches
185 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100186 if(is_batched_fc_layer)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100187 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100188 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
189 input->info()->tensor_shape().cend(),
190 output->info()->tensor_shape().cbegin() + 1));
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100191 }
192 else
193 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100194 _is_fc_after_conv = input->info()->num_dimensions() > 1;
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100195 }
196
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100197 // Reshape weights if needed
198 if(!_are_weights_reshaped)
199 {
200 // Reshape the weights
201 _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
202 weights_to_use = &_reshape_weights_output;
203 }
204
205 // Convert weights if needed
206 if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
207 {
208 // Convert weights
209 _convert_weights.configure(weights_to_use,
210 &_converted_weights_output,
211 input->info()->tensor_shape(),
212 fc_info.weights_trained_layout);
213
214 weights_to_use = &_converted_weights_output;
215 _are_weights_converted = false;
216 }
217
218 // Configure fc core
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000219 ICLTensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100220 if(_is_fc_after_conv)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100221 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100222 // Fully Connected layer after a Convolution Layer without batches
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000223 configure_conv_fc(input, weights_to_use, tmp_output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100224 }
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100225 else
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100226 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100227 // Fully Connected layer after a Fully Connected Layer without batches
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000228 configure_fc_fc(input, weights_to_use, tmp_output);
229 }
230
231 // Configure output stage for asymmetric quantized types
232 if(_is_quantized)
233 {
234 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
235 int output_multiplier, output_shift;
236 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Gian Marco58c57942017-11-28 09:10:03 +0000237 _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000238 _gemmlowp_output.allocator()->allocate();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100239 }
Michele Di Giorgiob62280a2018-05-31 17:31:05 +0100240
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100241 _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100242}
243
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100244Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
245 FullyConnectedLayerInfo fc_info)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000246{
247 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100248 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000249 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
250 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
251
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100252 bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000253 bool is_fc_after_conv = true;
254 bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
255 const GPUTarget gpu_target = CLScheduler::get().target();
256
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100257 const ITensorInfo &im2col_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_im2col_fc_shape(input)).set_data_layout(DataLayout::NCHW));
258 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100259 const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100260 const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000261
262 // Configure accumulate biases kernel for non quantized asymmetric types
263 if(biases != nullptr && !is_quantized)
264 {
265 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
266 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixAccumulateBiasesKernel::validate(output, biases, gpu_target));
267 }
268
269 // With the Fully Connected layer we can have 4 different cases:
270 // 1) Convolution layer -> Fully Connected layer without batches
271 // 2) Fully Connected layer -> Fully Connected layer without batches
272 // 3) Convolution layer -> Fully Connected layer with batches
273 // 4) Fully Connected layer -> Fully Connected layer with batches
274
275 const ITensorInfo *input_to_use = input;
276 const ITensorInfo *weights_to_use = weights;
277 const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
278
Georgios Pinitas358ca202017-12-07 16:47:52 +0000279 // Check if we have a fully connected layer with batches
280 const bool is_batched_fc_layer = output->dimension(1) > 1;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000281 if(is_batched_fc_layer)
282 {
283 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
284 input->tensor_shape().cend(),
285 output->tensor_shape().cbegin() + 1));
286 }
287 else
288 {
289 is_fc_after_conv = input->num_dimensions() > 1;
290 }
291
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100292 if(!weights_reshaped)
293 {
294 // Validate reshape weights kernel
295 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
296 weights_to_use = &reshaped_weights;
297 }
298
299 if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
300 {
301 // Validate convert weights kernel
302 ARM_COMPUTE_RETURN_ON_ERROR(CLConvertFullyConnectedWeights::validate(weights_to_use,
303 &converted_weights,
304 input->tensor_shape(),
305 fc_info.weights_trained_layout));
306 weights_to_use = &converted_weights;
307 }
308
Georgios Pinitas358ca202017-12-07 16:47:52 +0000309 if(is_fc_after_conv)
310 {
311 // Fully Connected layer after a Convolution Layer without batches
312 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
313
314 // Validate im2col kernel
315 ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_input, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false));
316 input_to_use = &im2col_input;
317 }
318 else
319 {
320 // Fully Connected layer after a Fully Connected Layer without batches
321 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
322 }
323 // Validate matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100324 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000325
326 // Validate output stage for asymmetric quantized types
327 if(is_quantized)
328 {
329 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&gemmlowp_output, biases, output));
330 }
331
332 return Status{};
333}
334
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100335void CLFullyConnectedLayer::run()
336{
Georgios Pinitase0437672018-05-02 14:07:55 +0100337 prepare();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100338
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100339 _memory_group.acquire();
340
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100341 // Linearize input if it comes from a convolutional layer
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100342 if(_is_fc_after_conv)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100343 {
344 CLScheduler::get().enqueue(_im2col_kernel, false);
345 }
346
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100347 // Run matrix multiply
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000348 if(_is_quantized)
349 {
350 _mm_gemmlowp.run();
351 }
352 else
353 {
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100354 _mm_gemm.run();
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000355 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100356
357 // Accumulate biases if provided
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000358 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100359 {
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000360 _gemmlowp_output_stage.run();
361 }
362 else
363 {
364 if(_accumulate_biases)
365 {
366 CLScheduler::get().enqueue(_accumulate_biases_kernel);
367 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100368 }
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100369
370 _memory_group.release();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100371}
Georgios Pinitase0437672018-05-02 14:07:55 +0100372
373void CLFullyConnectedLayer::prepare()
374{
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100375 if(!_is_prepared)
Georgios Pinitase0437672018-05-02 14:07:55 +0100376 {
377 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
378
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100379 auto release_unused = [](CLTensor * w)
380 {
381 if(!w->is_used())
382 {
383 CLScheduler::get().queue().finish();
384 w->allocator()->free();
385 }
386 };
387
388 // Pointer to current weights
389 const ICLTensor *cur_weights = _original_weights;
390
391 // Reshape of the weights if needed (happens only once)
392 if(!_are_weights_reshaped)
393 {
394 // Run reshape weights kernel and mark weights as unused
395 _reshape_weights_output.allocator()->allocate();
396 _reshape_weights_kernel.run();
397
398 cur_weights->mark_as_unused();
399 cur_weights = &_reshape_weights_output;
400 _are_weights_reshaped = true;
401 }
402
403 // Convert weights if needed (happens only once)
404 if(!_are_weights_converted)
405 {
406 _converted_weights_output.allocator()->allocate();
407 _convert_weights.run();
408
409 cur_weights->mark_as_unused();
410 _are_weights_converted = true;
411 }
412
413 // Release reshaped weights if unused
414 release_unused(&_reshape_weights_output);
Georgios Pinitase0437672018-05-02 14:07:55 +0100415
416 // Prepare GEMM prepare and release unused weights
417 if(!_is_quantized)
418 {
419 _mm_gemm.prepare();
Georgios Pinitase0437672018-05-02 14:07:55 +0100420 }
421
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100422 // Release converted weights if unused
423 release_unused(&_reshape_weights_output);
424 release_unused(&_converted_weights_output);
425
426 _is_prepared = true;
Georgios Pinitase0437672018-05-02 14:07:55 +0100427 }
428}