blob: 60c28a0874ffb2019cba4c09dd6ea91b7db0259c [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
25
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010026#include "arm_compute/core/Size2D.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010027#include "arm_compute/core/Validate.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000029#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010030#include "arm_compute/runtime/CL/CLScheduler.h"
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010031#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032
33#include <algorithm>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010035using namespace arm_compute;
Georgios Pinitas358ca202017-12-07 16:47:52 +000036using namespace arm_compute::misc::shape_calculator;
37
38namespace
39{
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010040Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
Georgios Pinitas358ca202017-12-07 16:47:52 +000041{
Georgios Pinitas358ca202017-12-07 16:47:52 +000042 if(is_data_type_quantized_asymmetric(input.data_type()))
43 {
44 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
45 // Extract and negate input and weights offset
46 const QuantizationInfo input_quantization_info(input.quantization_info().scale, -input.quantization_info().offset);
47 const QuantizationInfo weights_quantization_info(weights.quantization_info().scale, -weights.quantization_info().offset);
48
49 // Validate gemmlowp function
50 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
51 &weights.clone()->set_quantization_info(weights_quantization_info),
52 &output));
53 }
54 else
55 {
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010056 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input, &weights, nullptr, &output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
Georgios Pinitas358ca202017-12-07 16:47:52 +000057 }
58
59 return Status{};
60}
61} // namespace
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010062
63void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +010064{
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010065 auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
66 k->configure(input, output);
67 _kernel = std::move(k);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010068}
69
Georgios Pinitas358ca202017-12-07 16:47:52 +000070Status CLFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
71{
72 return CLTransposeKernel::validate(input, output);
73}
74
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010075CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +010076 : _memory_group(memory_manager), _convert_weights(), _flatten_layer(), _reshape_weights_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(),
77 _accumulate_biases_kernel(), _flatten_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _are_weights_converted(true), _are_weights_reshaped(true),
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +010078 _is_fc_after_conv(true), _accumulate_biases(false), _is_quantized(false), _is_prepared(false), _original_weights(nullptr)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010079{
80}
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010081void CLFullyConnectedLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000082{
83 if(_is_quantized)
84 {
Chunosov5124be52017-11-22 20:42:13 +070085 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000086 // Extract and negate input and weights offset
Chunosov5124be52017-11-22 20:42:13 +070087 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
88 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
89
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000090 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
91 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
Chunosov5124be52017-11-22 20:42:13 +070092
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000093 // Configure gemmlowp function
94 _mm_gemmlowp.configure(input, weights, output);
Chunosov5124be52017-11-22 20:42:13 +070095
96 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
97 input->info()->set_quantization_info(input_quantization_info);
98 weights->info()->set_quantization_info(weights_quantization_info);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000099 }
100 else
101 {
102 // Configure matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100103 _mm_gemm.configure(input, weights, nullptr, output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */));
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000104 }
105}
106
107void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100108{
109 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
110
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100111 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
112
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100113 // Initialize output tensor for flatten
114 TensorShape shape_flatten = compute_flatten_shape(input->info());
115 _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten).set_data_layout(DataLayout::NCHW));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100116
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100117 // Configure flatten kernel
118 _memory_group.manage(&_flatten_output);
119 _flatten_layer.configure(input, &_flatten_output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100120
121 // Configure matrix multiply kernel
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100122 configure_mm(&_flatten_output, weights, output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100123
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100124 // Allocate the output tensor for flatten once all the configure methods have been called
125 _flatten_output.allocator()->allocate();
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100126}
127
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000128void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100129{
130 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
131
132 // Configure matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100133 configure_mm(input, weights, output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100134}
135
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100136void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
137 FullyConnectedLayerInfo fc_info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100138{
Georgios Pinitas358ca202017-12-07 16:47:52 +0000139 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
140
141 // Perform validate step
142 ARM_COMPUTE_ERROR_THROW_ON(CLFullyConnectedLayer::validate(input->info(),
143 weights->info(),
144 biases != nullptr ? biases->info() : nullptr,
145 output->info(),
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100146 fc_info));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100147
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100148 _are_weights_converted = true;
149 _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
150 _is_fc_after_conv = true;
151 _accumulate_biases = false;
152 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
153 _is_prepared = false;
154 _original_weights = weights;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100155
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000156 // Configure gemmlowp output
157 if(_is_quantized)
158 {
159 _gemmlowp_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
160 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000161
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000162 // Configure accumulate biases kernel for non quantized asymmetric types
163 if(biases != nullptr && !_is_quantized)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100164 {
165 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
166
167 _accumulate_biases = true;
168
169 // Configure accumulate biases kernel
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000170 _accumulate_biases_kernel.set_target(CLScheduler::get().target());
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100171 _accumulate_biases_kernel.configure(output, biases);
172 }
173
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100174 const ICLTensor *weights_to_use = weights;
175
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100176 // With the Fully Connected layer we can have 4 different cases:
177 // 1) Convolution layer -> Fully Connected layer without batches
178 // 2) Fully Connected layer -> Fully Connected layer without batches
179 // 3) Convolution layer -> Fully Connected layer with batches
180 // 4) Fully Connected layer -> Fully Connected layer with batches
181
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100182 // Check if we have a fully connected layer with batches
183 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100184 if(is_batched_fc_layer)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100185 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100186 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
187 input->info()->tensor_shape().cend(),
188 output->info()->tensor_shape().cbegin() + 1));
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100189 }
190 else
191 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100192 _is_fc_after_conv = input->info()->num_dimensions() > 1;
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100193 }
194
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100195 // Reshape weights if needed
196 if(!_are_weights_reshaped)
197 {
198 // Reshape the weights
199 _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
200 weights_to_use = &_reshape_weights_output;
201 }
202
203 // Convert weights if needed
204 if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
205 {
206 // Convert weights
207 _convert_weights.configure(weights_to_use,
208 &_converted_weights_output,
209 input->info()->tensor_shape(),
210 fc_info.weights_trained_layout);
211
212 weights_to_use = &_converted_weights_output;
213 _are_weights_converted = false;
214 }
215
216 // Configure fc core
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000217 ICLTensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100218 if(_is_fc_after_conv)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100219 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100220 // Fully Connected layer after a Convolution Layer without batches
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000221 configure_conv_fc(input, weights_to_use, tmp_output);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100222 }
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100223 else
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100224 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100225 // Fully Connected layer after a Fully Connected Layer without batches
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000226 configure_fc_fc(input, weights_to_use, tmp_output);
227 }
228
229 // Configure output stage for asymmetric quantized types
230 if(_is_quantized)
231 {
232 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
233 int output_multiplier, output_shift;
234 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Gian Marco58c57942017-11-28 09:10:03 +0000235 _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000236 _gemmlowp_output.allocator()->allocate();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100237 }
Michele Di Giorgiob62280a2018-05-31 17:31:05 +0100238
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100239 _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100240}
241
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100242Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
243 FullyConnectedLayerInfo fc_info)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000244{
245 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100246 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000247 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
248 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
249
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100250 bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000251 bool is_fc_after_conv = true;
252 bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
253 const GPUTarget gpu_target = CLScheduler::get().target();
254
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100255 const ITensorInfo &flatten_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input)).set_data_layout(DataLayout::NCHW));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100256 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100257 const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100258 const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000259
260 // Configure accumulate biases kernel for non quantized asymmetric types
261 if(biases != nullptr && !is_quantized)
262 {
263 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
264 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixAccumulateBiasesKernel::validate(output, biases, gpu_target));
265 }
266
267 // With the Fully Connected layer we can have 4 different cases:
268 // 1) Convolution layer -> Fully Connected layer without batches
269 // 2) Fully Connected layer -> Fully Connected layer without batches
270 // 3) Convolution layer -> Fully Connected layer with batches
271 // 4) Fully Connected layer -> Fully Connected layer with batches
272
273 const ITensorInfo *input_to_use = input;
274 const ITensorInfo *weights_to_use = weights;
275 const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
276
Georgios Pinitas358ca202017-12-07 16:47:52 +0000277 // Check if we have a fully connected layer with batches
278 const bool is_batched_fc_layer = output->dimension(1) > 1;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000279 if(is_batched_fc_layer)
280 {
281 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
282 input->tensor_shape().cend(),
283 output->tensor_shape().cbegin() + 1));
284 }
285 else
286 {
287 is_fc_after_conv = input->num_dimensions() > 1;
288 }
289
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100290 if(!weights_reshaped)
291 {
292 // Validate reshape weights kernel
293 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
294 weights_to_use = &reshaped_weights;
295 }
296
297 if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
298 {
299 // Validate convert weights kernel
300 ARM_COMPUTE_RETURN_ON_ERROR(CLConvertFullyConnectedWeights::validate(weights_to_use,
301 &converted_weights,
302 input->tensor_shape(),
303 fc_info.weights_trained_layout));
304 weights_to_use = &converted_weights;
305 }
306
Georgios Pinitas358ca202017-12-07 16:47:52 +0000307 if(is_fc_after_conv)
308 {
309 // Fully Connected layer after a Convolution Layer without batches
310 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
311
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100312 // Validate flatten kernel
313 ARM_COMPUTE_RETURN_ON_ERROR(CLFlattenLayer::validate(input, &flatten_input));
314 input_to_use = &flatten_input;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000315 }
316 else
317 {
318 // Fully Connected layer after a Fully Connected Layer without batches
319 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
320 }
321 // Validate matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100322 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000323
324 // Validate output stage for asymmetric quantized types
325 if(is_quantized)
326 {
327 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&gemmlowp_output, biases, output));
328 }
329
330 return Status{};
331}
332
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100333void CLFullyConnectedLayer::run()
334{
Georgios Pinitase0437672018-05-02 14:07:55 +0100335 prepare();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100336
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100337 _memory_group.acquire();
338
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100339 // Linearize input if it comes from a convolutional layer
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100340 if(_is_fc_after_conv)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100341 {
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100342 _flatten_layer.run();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100343 }
344
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100345 // Run matrix multiply
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000346 if(_is_quantized)
347 {
348 _mm_gemmlowp.run();
349 }
350 else
351 {
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100352 _mm_gemm.run();
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000353 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100354
355 // Accumulate biases if provided
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000356 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100357 {
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000358 _gemmlowp_output_stage.run();
359 }
360 else
361 {
362 if(_accumulate_biases)
363 {
364 CLScheduler::get().enqueue(_accumulate_biases_kernel);
365 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100366 }
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100367
368 _memory_group.release();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100369}
Georgios Pinitase0437672018-05-02 14:07:55 +0100370
371void CLFullyConnectedLayer::prepare()
372{
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100373 if(!_is_prepared)
Georgios Pinitase0437672018-05-02 14:07:55 +0100374 {
375 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
376
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100377 auto release_unused = [](CLTensor * w)
378 {
379 if(!w->is_used())
380 {
381 CLScheduler::get().queue().finish();
382 w->allocator()->free();
383 }
384 };
385
386 // Pointer to current weights
387 const ICLTensor *cur_weights = _original_weights;
388
389 // Reshape of the weights if needed (happens only once)
390 if(!_are_weights_reshaped)
391 {
392 // Run reshape weights kernel and mark weights as unused
393 _reshape_weights_output.allocator()->allocate();
394 _reshape_weights_kernel.run();
395
396 cur_weights->mark_as_unused();
397 cur_weights = &_reshape_weights_output;
398 _are_weights_reshaped = true;
399 }
400
401 // Convert weights if needed (happens only once)
402 if(!_are_weights_converted)
403 {
404 _converted_weights_output.allocator()->allocate();
405 _convert_weights.run();
406
407 cur_weights->mark_as_unused();
408 _are_weights_converted = true;
409 }
410
411 // Release reshaped weights if unused
412 release_unused(&_reshape_weights_output);
Georgios Pinitase0437672018-05-02 14:07:55 +0100413
414 // Prepare GEMM prepare and release unused weights
415 if(!_is_quantized)
416 {
417 _mm_gemm.prepare();
Georgios Pinitase0437672018-05-02 14:07:55 +0100418 }
419
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100420 // Release converted weights if unused
421 release_unused(&_reshape_weights_output);
422 release_unused(&_converted_weights_output);
423
424 _is_prepared = true;
Georgios Pinitase0437672018-05-02 14:07:55 +0100425 }
426}