blob: 0452a236c50fa5121f9412ae9d3d75e3eb9052f2 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Georgios Pinitasda953f22019-04-02 17:27:03 +01002 * Copyright (c) 2017-2019 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
25
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010026#include "arm_compute/core/Size2D.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010027#include "arm_compute/core/Validate.h"
Georgios Pinitas358ca202017-12-07 16:47:52 +000028#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000029#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010030#include "arm_compute/runtime/CL/CLScheduler.h"
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010031#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032
33#include <algorithm>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010034
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010035using namespace arm_compute;
Georgios Pinitas358ca202017-12-07 16:47:52 +000036using namespace arm_compute::misc::shape_calculator;
37
38namespace
39{
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010040Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
Georgios Pinitas358ca202017-12-07 16:47:52 +000041{
Georgios Pinitas358ca202017-12-07 16:47:52 +000042 if(is_data_type_quantized_asymmetric(input.data_type()))
43 {
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010044 const UniformQuantizationInfo iq_info = input.quantization_info().uniform();
45 const UniformQuantizationInfo wq_info = weights.quantization_info().uniform();
46
Georgios Pinitas358ca202017-12-07 16:47:52 +000047 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
48 // Extract and negate input and weights offset
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010049 const QuantizationInfo input_quantization_info(iq_info.scale, -iq_info.offset);
50 const QuantizationInfo weights_quantization_info(wq_info.scale, -wq_info.offset);
Georgios Pinitas358ca202017-12-07 16:47:52 +000051
52 // Validate gemmlowp function
53 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
54 &weights.clone()->set_quantization_info(weights_quantization_info),
Gian Marco Iodice4b908652018-10-18 10:21:02 +010055 nullptr,
Georgios Pinitas358ca202017-12-07 16:47:52 +000056 &output));
57 }
58 else
59 {
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +010060 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input, &weights, nullptr, &output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
Georgios Pinitas358ca202017-12-07 16:47:52 +000061 }
62
63 return Status{};
64}
65} // namespace
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010066
67void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +010068{
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010069 auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
70 k->configure(input, output);
71 _kernel = std::move(k);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010072}
73
Georgios Pinitas358ca202017-12-07 16:47:52 +000074Status CLFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
75{
76 return CLTransposeKernel::validate(input, output);
77}
78
Michalis Spyrou1a569a32019-09-10 17:20:34 +010079CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +010080 : _memory_group(memory_manager), _convert_weights(), _flatten_layer(), _reshape_weights_kernel(), _mm_gemm(memory_manager), _mm_gemmlowp(memory_manager), _gemmlowp_output_stage(),
81 _accumulate_biases_kernel(), _flatten_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _are_weights_converted(true), _are_weights_reshaped(true),
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +010082 _is_fc_after_conv(true), _accumulate_biases(false), _is_quantized(false), _is_prepared(false), _original_weights(nullptr)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010083{
84}
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +010085void CLFullyConnectedLayer::configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool retain_internal_weights)
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000086{
87 if(_is_quantized)
88 {
Chunosov5124be52017-11-22 20:42:13 +070089 // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000090 // Extract and negate input and weights offset
Chunosov5124be52017-11-22 20:42:13 +070091 const QuantizationInfo input_quantization_info = input->info()->quantization_info();
92 const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
93
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010094 input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
95 weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
Chunosov5124be52017-11-22 20:42:13 +070096
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +000097 // Configure gemmlowp function
Gian Marco Iodice4b908652018-10-18 10:21:02 +010098 _mm_gemmlowp.configure(input, weights, nullptr, output);
Chunosov5124be52017-11-22 20:42:13 +070099
100 // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
101 input->info()->set_quantization_info(input_quantization_info);
102 weights->info()->set_quantization_info(weights_quantization_info);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000103 }
104 else
105 {
106 // Configure matrix multiply kernel
Gian Marco Iodice3139f032018-11-05 14:26:32 +0000107 _mm_gemm.configure(input, weights, nullptr, output, 1.f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */, 0, false, retain_internal_weights));
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000108 }
109}
110
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100111void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool retain_internal_weights)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100112{
113 ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
114
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100115 // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
116
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100117 // Initialize output tensor for flatten
118 TensorShape shape_flatten = compute_flatten_shape(input->info());
119 _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten).set_data_layout(DataLayout::NCHW));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100120
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100121 // Configure flatten kernel
122 _memory_group.manage(&_flatten_output);
123 _flatten_layer.configure(input, &_flatten_output);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100124
125 // Configure matrix multiply kernel
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100126 configure_mm(&_flatten_output, weights, output, retain_internal_weights);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100127
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100128 // Allocate the output tensor for flatten once all the configure methods have been called
129 _flatten_output.allocator()->allocate();
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100130}
131
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100132void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, bool retain_internal_weights)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100133{
134 ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
135
136 // Configure matrix multiply kernel
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100137 configure_mm(input, weights, output, retain_internal_weights);
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100138}
139
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100140void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
141 FullyConnectedLayerInfo fc_info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100142{
Georgios Pinitas358ca202017-12-07 16:47:52 +0000143 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
144
145 // Perform validate step
146 ARM_COMPUTE_ERROR_THROW_ON(CLFullyConnectedLayer::validate(input->info(),
147 weights->info(),
148 biases != nullptr ? biases->info() : nullptr,
149 output->info(),
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100150 fc_info));
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100151
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100152 _are_weights_converted = true;
153 _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
154 _is_fc_after_conv = true;
155 _accumulate_biases = false;
156 _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100157 _is_prepared = fc_info.retain_internal_weights;
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100158 _original_weights = weights;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100159
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000160 // Configure gemmlowp output
161 if(_is_quantized)
162 {
163 _gemmlowp_output.allocator()->init(output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
164 }
Anton Lokhmotov3e80c7f2017-11-20 11:02:10 +0000165
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000166 // Configure accumulate biases kernel for non quantized asymmetric types
167 if(biases != nullptr && !_is_quantized)
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100168 {
169 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
170
171 _accumulate_biases = true;
172
173 // Configure accumulate biases kernel
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000174 _accumulate_biases_kernel.set_target(CLScheduler::get().target());
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100175 _accumulate_biases_kernel.configure(output, biases);
176 }
177
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100178 const ICLTensor *weights_to_use = weights;
179
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100180 // With the Fully Connected layer we can have 4 different cases:
181 // 1) Convolution layer -> Fully Connected layer without batches
182 // 2) Fully Connected layer -> Fully Connected layer without batches
183 // 3) Convolution layer -> Fully Connected layer with batches
184 // 4) Fully Connected layer -> Fully Connected layer with batches
185
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100186 // Check if we have a fully connected layer with batches
187 const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100188 if(is_batched_fc_layer)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100189 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100190 _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
191 input->info()->tensor_shape().cend(),
192 output->info()->tensor_shape().cbegin() + 1));
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100193 }
194 else
195 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100196 _is_fc_after_conv = input->info()->num_dimensions() > 1;
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100197 }
198
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100199 // Reshape weights if needed
200 if(!_are_weights_reshaped)
201 {
202 // Reshape the weights
203 _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
204 weights_to_use = &_reshape_weights_output;
205 }
206
207 // Convert weights if needed
208 if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
209 {
210 // Convert weights
211 _convert_weights.configure(weights_to_use,
212 &_converted_weights_output,
213 input->info()->tensor_shape(),
214 fc_info.weights_trained_layout);
215
216 weights_to_use = &_converted_weights_output;
217 _are_weights_converted = false;
218 }
219
220 // Configure fc core
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000221 ICLTensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100222 if(_is_fc_after_conv)
Moritz Pflanzer768e9f12017-08-11 15:33:30 +0100223 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100224 // Fully Connected layer after a Convolution Layer without batches
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100225 configure_conv_fc(input, weights_to_use, tmp_output, fc_info.retain_internal_weights);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100226 }
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100227 else
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100228 {
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100229 // Fully Connected layer after a Fully Connected Layer without batches
Michele Di Giorgioba1ffe92018-08-22 14:28:30 +0100230 configure_fc_fc(input, weights_to_use, tmp_output, fc_info.retain_internal_weights);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000231 }
232
233 // Configure output stage for asymmetric quantized types
234 if(_is_quantized)
235 {
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100236 const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
237 const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
238 const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
239
240 float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
Michalis Spyroua4f378d2019-04-26 14:54:54 +0100241 int output_multiplier;
242 int output_shift;
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000243 quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100244 _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, oq_info.offset);
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000245 _gemmlowp_output.allocator()->allocate();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100246 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100247}
248
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100249Status CLFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
250 FullyConnectedLayerInfo fc_info)
Georgios Pinitas358ca202017-12-07 16:47:52 +0000251{
252 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +0100253 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000254 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
255 ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
256
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100257 bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000258 bool is_fc_after_conv = true;
259 bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
260 const GPUTarget gpu_target = CLScheduler::get().target();
261
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100262 const ITensorInfo &flatten_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input)).set_data_layout(DataLayout::NCHW));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100263 const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
Georgios Pinitas195b0ba2018-08-02 17:18:51 +0100264 const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100265 const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000266
267 // Configure accumulate biases kernel for non quantized asymmetric types
268 if(biases != nullptr && !is_quantized)
269 {
270 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
271 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixAccumulateBiasesKernel::validate(output, biases, gpu_target));
272 }
273
274 // With the Fully Connected layer we can have 4 different cases:
275 // 1) Convolution layer -> Fully Connected layer without batches
276 // 2) Fully Connected layer -> Fully Connected layer without batches
277 // 3) Convolution layer -> Fully Connected layer with batches
278 // 4) Fully Connected layer -> Fully Connected layer with batches
279
280 const ITensorInfo *input_to_use = input;
281 const ITensorInfo *weights_to_use = weights;
282 const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
283
Georgios Pinitas358ca202017-12-07 16:47:52 +0000284 // Check if we have a fully connected layer with batches
285 const bool is_batched_fc_layer = output->dimension(1) > 1;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000286 if(is_batched_fc_layer)
287 {
288 is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
289 input->tensor_shape().cend(),
290 output->tensor_shape().cbegin() + 1));
291 }
292 else
293 {
294 is_fc_after_conv = input->num_dimensions() > 1;
295 }
296
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100297 if(!weights_reshaped)
298 {
299 // Validate reshape weights kernel
300 ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
301 weights_to_use = &reshaped_weights;
302 }
303
304 if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
305 {
306 // Validate convert weights kernel
307 ARM_COMPUTE_RETURN_ON_ERROR(CLConvertFullyConnectedWeights::validate(weights_to_use,
308 &converted_weights,
309 input->tensor_shape(),
310 fc_info.weights_trained_layout));
311 weights_to_use = &converted_weights;
312 }
313
Georgios Pinitas358ca202017-12-07 16:47:52 +0000314 if(is_fc_after_conv)
315 {
316 // Fully Connected layer after a Convolution Layer without batches
317 ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
318
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100319 // Validate flatten kernel
320 ARM_COMPUTE_RETURN_ON_ERROR(CLFlattenLayer::validate(input, &flatten_input));
321 input_to_use = &flatten_input;
Georgios Pinitas358ca202017-12-07 16:47:52 +0000322 }
323 else
324 {
325 // Fully Connected layer after a Fully Connected Layer without batches
326 ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
327 }
328 // Validate matrix multiply kernel
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100329 ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output));
Georgios Pinitas358ca202017-12-07 16:47:52 +0000330
331 // Validate output stage for asymmetric quantized types
332 if(is_quantized)
333 {
Gian Marco Iodiceec8cce82019-08-21 17:01:53 +0100334 const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
335 const UniformQuantizationInfo wq_info = weights->quantization_info().uniform();
336 const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
337 const float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
338
339 ARM_COMPUTE_UNUSED(multiplier);
340 ARM_COMPUTE_RETURN_ERROR_ON(multiplier > 1.0f);
Georgios Pinitas358ca202017-12-07 16:47:52 +0000341 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&gemmlowp_output, biases, output));
342 }
343
344 return Status{};
345}
346
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100347void CLFullyConnectedLayer::run()
348{
Georgios Pinitase0437672018-05-02 14:07:55 +0100349 prepare();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100350
Georgios Pinitasda953f22019-04-02 17:27:03 +0100351 MemoryGroupResourceScope scope_mg(_memory_group);
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100352
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100353 // Linearize input if it comes from a convolutional layer
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100354 if(_is_fc_after_conv)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100355 {
Gian Marco Iodice215b4ea2018-06-28 16:29:29 +0100356 _flatten_layer.run();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100357 }
358
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100359 // Run matrix multiply
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000360 if(_is_quantized)
361 {
362 _mm_gemmlowp.run();
363 }
364 else
365 {
Gian Marco Iodicec9c62c22018-04-06 10:00:10 +0100366 _mm_gemm.run();
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000367 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100368
369 // Accumulate biases if provided
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000370 if(_is_quantized)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100371 {
Georgios Pinitas45bcc3a2017-11-29 11:06:49 +0000372 _gemmlowp_output_stage.run();
373 }
374 else
375 {
376 if(_accumulate_biases)
377 {
378 CLScheduler::get().enqueue(_accumulate_biases_kernel);
379 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100380 }
381}
Georgios Pinitase0437672018-05-02 14:07:55 +0100382
383void CLFullyConnectedLayer::prepare()
384{
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100385 if(!_is_prepared)
Georgios Pinitase0437672018-05-02 14:07:55 +0100386 {
387 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
388
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100389 auto release_unused = [](CLTensor * w)
390 {
391 if(!w->is_used())
392 {
393 CLScheduler::get().queue().finish();
394 w->allocator()->free();
395 }
396 };
397
398 // Pointer to current weights
399 const ICLTensor *cur_weights = _original_weights;
400
401 // Reshape of the weights if needed (happens only once)
402 if(!_are_weights_reshaped)
403 {
404 // Run reshape weights kernel and mark weights as unused
405 _reshape_weights_output.allocator()->allocate();
406 _reshape_weights_kernel.run();
407
408 cur_weights->mark_as_unused();
409 cur_weights = &_reshape_weights_output;
410 _are_weights_reshaped = true;
411 }
412
413 // Convert weights if needed (happens only once)
414 if(!_are_weights_converted)
415 {
416 _converted_weights_output.allocator()->allocate();
417 _convert_weights.run();
418
419 cur_weights->mark_as_unused();
420 _are_weights_converted = true;
421 }
422
423 // Release reshaped weights if unused
424 release_unused(&_reshape_weights_output);
Georgios Pinitase0437672018-05-02 14:07:55 +0100425
426 // Prepare GEMM prepare and release unused weights
427 if(!_is_quantized)
428 {
429 _mm_gemm.prepare();
Georgios Pinitase0437672018-05-02 14:07:55 +0100430 }
431
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100432 // Release converted weights if unused
433 release_unused(&_reshape_weights_output);
434 release_unused(&_converted_weights_output);
435
436 _is_prepared = true;
Georgios Pinitase0437672018-05-02 14:07:55 +0100437 }
438}