blob: 6a2aac645777ed9182fe558cce98901b195a6f56 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco36a0a462018-01-12 10:21:40 +00002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
25
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010026#include "arm_compute/core/Size2D.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010027#include "arm_compute/core/Validate.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000029#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010030#include "arm_compute/runtime/CL/CLScheduler.h"
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010031#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032
33#include <algorithm>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010035using namespace arm_compute;
Georgios Pinitas358ca202017-12-07 16:47:52 +000036using namespace arm_compute::misc::shape_calculator;
37
38namespace
39{
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010040Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
Georgios Pinitas358ca202017-12-07 16:47:52 +000041{
Georgios Pinitas358ca202017-12-07 16:47:52 +000042 if(is_data_type_quantized_asymmetric(input.data_type()))
43 {
44 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
45 // Extract and negate input and weights offset
46 const QuantizationInfo input_quantization_info(input.quantization_info().scale, -input.quantization_info().offset);
47 const QuantizationInfo weights_quantization_info(weights.quantization_info().scale, -weights.quantization_info().offset);
48
49 // Validate gemmlowp function
50 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
51 &weights.clone()->set_quantization_info(weights_quantization_info),
Gian Marco Iodice4b908652018-10-18 10:21:02 +010052 nullptr,
Georgios Pinitas358ca202017-12-07 16:47:52 +000053 &output));
54 }
55 else
56 {
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010057 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input, &weights, nullptr, &output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
Georgios Pinitas358ca202017-12-07 16:47:52 +000058 }
59
60 return Status{};
61}
62} // namespace
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010063
64void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +010065{
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010066 auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
67 k->configure(input, output);
68 _kernel = std::move(k);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010069}
70
Georgios Pinitas358ca202017-12-07 16:47:52 +000071Status CLFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
72{
73 return CLTransposeKernel::validate(input, output);
74}
75
Georgios Pinitasbaf174e2017-09-08 19:47:30 +010076CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +010077 : _memory_group(memory_manager), _convert_weights(), _flatten_layer(), _reshape_weights_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(),
78 _accumulate_biases_kernel(), _flatten_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _are_weights_converted(true), _are_weights_reshaped(true),
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +010079 _is_fc_after_conv(true), _accumulate_biases(false), _is_quantized(false), _is_prepared(false), _original_weights(nullptr)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010080{
81}
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +010082void CLFullyConnectedLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool retain_internal_weights)
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000083{
84 if(_is_quantized)
85 {
Chunosov5124be52017-11-22 20:42:13 +070086 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000087 // Extract and negate input and weights offset
Chunosov5124be52017-11-22 20:42:13 +070088 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
89 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
90
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000091 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
92 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
Chunosov5124be52017-11-22 20:42:13 +070093
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000094 // Configure gemmlowp function
Gian Marco Iodice4b908652018-10-18 10:21:02 +010095 _mm_gemmlowp.configure(input, weights, nullptr, output);
Chunosov5124be52017-11-22 20:42:13 +070096
97 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
98 input->info()->set_quantization_info(input_quantization_info);
99 weights->info()->set_quantization_info(weights_quantization_info);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000100 }
101 else
102 {
103 // Configure matrix multiply kernel
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000104 _mm_gemm.configure(input, weights, nullptr, output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */, 0, false, retain_internal_weights));
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000105 }
106}
107
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100108void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool retain_internal_weights)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100109{
110 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
111
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100112 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
113
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100114 // Initialize output tensor for flatten
115 TensorShape shape_flatten = compute_flatten_shape(input->info());
116 _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten).set_data_layout(DataLayout::NCHW));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100117
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100118 // Configure flatten kernel
119 _memory_group.manage(&_flatten_output);
120 _flatten_layer.configure(input, &_flatten_output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100121
122 // Configure matrix multiply kernel
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100123 configure_mm(&_flatten_output, weights, output, retain_internal_weights);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100124
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100125 // Allocate the output tensor for flatten once all the configure methods have been called
126 _flatten_output.allocator()->allocate();
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100127}
128
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100129void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool retain_internal_weights)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100130{
131 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
132
133 // Configure matrix multiply kernel
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100134 configure_mm(input, weights, output, retain_internal_weights);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100135}
136
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100137void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
138 FullyConnectedLayerInfo fc_info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100139{
Georgios Pinitas358ca202017-12-07 16:47:52 +0000140 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
141
142 // Perform validate step
143 ARM_COMPUTE_ERROR_THROW_ON(CLFullyConnectedLayer::validate(input->info(),
144 weights->info(),
145 biases != nullptr ? biases->info() : nullptr,
146 output->info(),
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100147 fc_info));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100148
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100149 _are_weights_converted = true;
150 _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
151 _is_fc_after_conv = true;
152 _accumulate_biases = false;
153 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100154 _is_prepared = fc_info.retain_internal_weights;
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100155 _original_weights = weights;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100156
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000157 // Configure gemmlowp output
158 if(_is_quantized)
159 {
160 _gemmlowp_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
161 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000162
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000163 // Configure accumulate biases kernel for non quantized asymmetric types
164 if(biases != nullptr && !_is_quantized)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100165 {
166 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
167
168 _accumulate_biases = true;
169
170 // Configure accumulate biases kernel
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000171 _accumulate_biases_kernel.set_target(CLScheduler::get().target());
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100172 _accumulate_biases_kernel.configure(output, biases);
173 }
174
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100175 const ICLTensor *weights_to_use = weights;
176
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100177 // With the Fully Connected layer we can have 4 different cases:
178 // 1) Convolution layer -> Fully Connected layer without batches
179 // 2) Fully Connected layer -> Fully Connected layer without batches
180 // 3) Convolution layer -> Fully Connected layer with batches
181 // 4) Fully Connected layer -> Fully Connected layer with batches
182
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100183 // Check if we have a fully connected layer with batches
184 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100185 if(is_batched_fc_layer)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100186 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100187 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
188 input->info()->tensor_shape().cend(),
189 output->info()->tensor_shape().cbegin() + 1));
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100190 }
191 else
192 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100193 _is_fc_after_conv = input->info()->num_dimensions() > 1;
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100194 }
195
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100196 // Reshape weights if needed
197 if(!_are_weights_reshaped)
198 {
199 // Reshape the weights
200 _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
201 weights_to_use = &_reshape_weights_output;
202 }
203
204 // Convert weights if needed
205 if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
206 {
207 // Convert weights
208 _convert_weights.configure(weights_to_use,
209 &_converted_weights_output,
210 input->info()->tensor_shape(),
211 fc_info.weights_trained_layout);
212
213 weights_to_use = &_converted_weights_output;
214 _are_weights_converted = false;
215 }
216
217 // Configure fc core
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000218 ICLTensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100219 if(_is_fc_after_conv)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100220 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100221 // Fully Connected layer after a Convolution Layer without batches
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100222 configure_conv_fc(input, weights_to_use, tmp_output, fc_info.retain_internal_weights);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100223 }
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100224 else
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100225 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100226 // Fully Connected layer after a Fully Connected Layer without batches
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100227 configure_fc_fc(input, weights_to_use, tmp_output, fc_info.retain_internal_weights);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000228 }
229
230 // Configure output stage for asymmetric quantized types
231 if(_is_quantized)
232 {
233 float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
234 int output_multiplier, output_shift;
235 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Gian Marco58c57942017-11-28 09:10:03 +0000236 _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000237 _gemmlowp_output.allocator()->allocate();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100238 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100239}
240
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100241Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
242 FullyConnectedLayerInfo fc_info)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000243{
244 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100245 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000246 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
247 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
248
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100249 bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000250 bool is_fc_after_conv = true;
251 bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
252 const GPUTarget gpu_target = CLScheduler::get().target();
253
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100254 const ITensorInfo &flatten_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input)).set_data_layout(DataLayout::NCHW));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100255 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100256 const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100257 const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000258
259 // Configure accumulate biases kernel for non quantized asymmetric types
260 if(biases != nullptr && !is_quantized)
261 {
262 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
263 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixAccumulateBiasesKernel::validate(output, biases, gpu_target));
264 }
265
266 // With the Fully Connected layer we can have 4 different cases:
267 // 1) Convolution layer -> Fully Connected layer without batches
268 // 2) Fully Connected layer -> Fully Connected layer without batches
269 // 3) Convolution layer -> Fully Connected layer with batches
270 // 4) Fully Connected layer -> Fully Connected layer with batches
271
272 const ITensorInfo *input_to_use = input;
273 const ITensorInfo *weights_to_use = weights;
274 const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
275
Georgios Pinitas358ca202017-12-07 16:47:52 +0000276 // Check if we have a fully connected layer with batches
277 const bool is_batched_fc_layer = output->dimension(1) > 1;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000278 if(is_batched_fc_layer)
279 {
280 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
281 input->tensor_shape().cend(),
282 output->tensor_shape().cbegin() + 1));
283 }
284 else
285 {
286 is_fc_after_conv = input->num_dimensions() > 1;
287 }
288
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100289 if(!weights_reshaped)
290 {
291 // Validate reshape weights kernel
292 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
293 weights_to_use = &reshaped_weights;
294 }
295
296 if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
297 {
298 // Validate convert weights kernel
299 ARM_COMPUTE_RETURN_ON_ERROR(CLConvertFullyConnectedWeights::validate(weights_to_use,
300 &converted_weights,
301 input->tensor_shape(),
302 fc_info.weights_trained_layout));
303 weights_to_use = &converted_weights;
304 }
305
Georgios Pinitas358ca202017-12-07 16:47:52 +0000306 if(is_fc_after_conv)
307 {
308 // Fully Connected layer after a Convolution Layer without batches
309 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
310
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100311 // Validate flatten kernel
312 ARM_COMPUTE_RETURN_ON_ERROR(CLFlattenLayer::validate(input, &flatten_input));
313 input_to_use = &flatten_input;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000314 }
315 else
316 {
317 // Fully Connected layer after a Fully Connected Layer without batches
318 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
319 }
320 // Validate matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100321 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000322
323 // Validate output stage for asymmetric quantized types
324 if(is_quantized)
325 {
326 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&gemmlowp_output, biases, output));
327 }
328
329 return Status{};
330}
331
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100332void CLFullyConnectedLayer::run()
333{
Georgios Pinitase0437672018-05-02 14:07:55 +0100334 prepare();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100335
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100336 _memory_group.acquire();
337
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100338 // Linearize input if it comes from a convolutional layer
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100339 if(_is_fc_after_conv)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100340 {
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100341 _flatten_layer.run();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100342 }
343
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100344 // Run matrix multiply
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000345 if(_is_quantized)
346 {
347 _mm_gemmlowp.run();
348 }
349 else
350 {
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100351 _mm_gemm.run();
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000352 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100353
354 // Accumulate biases if provided
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000355 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100356 {
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000357 _gemmlowp_output_stage.run();
358 }
359 else
360 {
361 if(_accumulate_biases)
362 {
363 CLScheduler::get().enqueue(_accumulate_biases_kernel);
364 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100365 }
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100366
367 _memory_group.release();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100368}
Georgios Pinitase0437672018-05-02 14:07:55 +0100369
370void CLFullyConnectedLayer::prepare()
371{
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100372 if(!_is_prepared)
Georgios Pinitase0437672018-05-02 14:07:55 +0100373 {
374 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
375
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100376 auto release_unused = [](CLTensor * w)
377 {
378 if(!w->is_used())
379 {
380 CLScheduler::get().queue().finish();
381 w->allocator()->free();
382 }
383 };
384
385 // Pointer to current weights
386 const ICLTensor *cur_weights = _original_weights;
387
388 // Reshape of the weights if needed (happens only once)
389 if(!_are_weights_reshaped)
390 {
391 // Run reshape weights kernel and mark weights as unused
392 _reshape_weights_output.allocator()->allocate();
393 _reshape_weights_kernel.run();
394
395 cur_weights->mark_as_unused();
396 cur_weights = &_reshape_weights_output;
397 _are_weights_reshaped = true;
398 }
399
400 // Convert weights if needed (happens only once)
401 if(!_are_weights_converted)
402 {
403 _converted_weights_output.allocator()->allocate();
404 _convert_weights.run();
405
406 cur_weights->mark_as_unused();
407 _are_weights_converted = true;
408 }
409
410 // Release reshaped weights if unused
411 release_unused(&_reshape_weights_output);
Georgios Pinitase0437672018-05-02 14:07:55 +0100412
413 // Prepare GEMM prepare and release unused weights
414 if(!_is_quantized)
415 {
416 _mm_gemm.prepare();
Georgios Pinitase0437672018-05-02 14:07:55 +0100417 }
418
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100419 // Release converted weights if unused
420 release_unused(&_reshape_weights_output);
421 release_unused(&_converted_weights_output);
422
423 _is_prepared = true;
Georgios Pinitase0437672018-05-02 14:07:55 +0100424 }
425}