blob: a99209578639f504a3e83d91ce33e79f08abea7a [file] [log] [blame]
Anthony Barbier2a07e182017-08-04 18:20:27 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/graph/nodes/ConvolutionLayer.h"
25
26#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
Georgios Pinitas6f669f02017-09-26 12:32:57 +010027#include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
28#include "arm_compute/runtime/IFunction.h"
Anthony Barbier2a07e182017-08-04 18:20:27 +010029#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
Georgios Pinitas6f669f02017-09-26 12:32:57 +010030#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
Anthony Barbier2a07e182017-08-04 18:20:27 +010031#include "support/ToolchainSupport.h"
Georgios Pinitas6f669f02017-09-26 12:32:57 +010032#include "utils/GraphTypePrinter.h"
Anthony Barbier2a07e182017-08-04 18:20:27 +010033#include "utils/TypePrinter.h"
34
Georgios Pinitas6f669f02017-09-26 12:32:57 +010035#include <tuple>
36#include <vector>
37
Anthony Barbier2a07e182017-08-04 18:20:27 +010038using namespace arm_compute::graph;
39
40namespace
41{
Georgios Pinitas6f669f02017-09-26 12:32:57 +010042/** Calculates the output shaped of the convolution layer
43 *
44 * @param[in] input_shape Input tensor shape
45 * @param[in] weights_shape Weights shape
46 * @param[in] conv_info Convolution information (padding, stride, etc.)
47 *
48 * @return The expected output tensor shape
49 */
50TensorShape calculate_convolution_layer_output_shape(const TensorShape &input_shape, const TensorShape &weights_shape, const PadStrideInfo &conv_info)
Anthony Barbier2a07e182017-08-04 18:20:27 +010051{
Georgios Pinitas6f669f02017-09-26 12:32:57 +010052 unsigned int output_width = 0;
53 unsigned int output_height = 0;
Anthony Barbier2a07e182017-08-04 18:20:27 +010054
Georgios Pinitas6f669f02017-09-26 12:32:57 +010055 // Get output width and height
56 std::tie(output_width, output_height) = arm_compute::scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), conv_info);
57
58 // Create output shape
59 TensorShape output_shape = input_shape;
60 output_shape.set(0, output_width);
61 output_shape.set(1, output_height);
62 output_shape.set(2, weights_shape[3]);
63
64 return output_shape;
65}
66
67// Instantiate GEMM based convolution layer
Georgios Pinitasff421f22017-10-04 16:53:58 +010068template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
Georgios Pinitas6f669f02017-09-26 12:32:57 +010069std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
70{
Anthony Barbier2a07e182017-08-04 18:20:27 +010071 auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
72 conv->configure(
73 dynamic_cast<TensorType *>(input),
Georgios Pinitas6f669f02017-09-26 12:32:57 +010074 dynamic_cast<TensorType *>(weights),
75 dynamic_cast<TensorType *>(biases),
Anthony Barbier2a07e182017-08-04 18:20:27 +010076 dynamic_cast<TensorType *>(output),
77 conv_info, weights_info);
Georgios Pinitas6f669f02017-09-26 12:32:57 +010078 return std::move(conv);
79}
Anthony Barbier2a07e182017-08-04 18:20:27 +010080
Georgios Pinitas6f669f02017-09-26 12:32:57 +010081// Instantiate direct convolution layer
Georgios Pinitasff421f22017-10-04 16:53:58 +010082template <typename ConvolutionType, typename TensorType, TargetHint target_hint>
Georgios Pinitas6f669f02017-09-26 12:32:57 +010083std::unique_ptr<arm_compute::IFunction> instantiate_direct_function(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
84{
85 auto conv = arm_compute::support::cpp14::make_unique<ConvolutionType>();
86 conv->configure(
87 dynamic_cast<TensorType *>(input),
88 dynamic_cast<TensorType *>(weights),
89 dynamic_cast<TensorType *>(biases),
90 dynamic_cast<TensorType *>(output),
91 conv_info);
Anthony Barbier2a07e182017-08-04 18:20:27 +010092 return std::move(conv);
93}
94
Georgios Pinitasff421f22017-10-04 16:53:58 +010095template <TargetHint target_hint>
Georgios Pinitas6f669f02017-09-26 12:32:57 +010096std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
97 ConvolutionMethodHint conv_method);
Anthony Barbier2a07e182017-08-04 18:20:27 +010098
99template <>
Georgios Pinitasff421f22017-10-04 16:53:58 +0100100std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
101 const WeightsInfo &weights_info,
102 ConvolutionMethodHint conv_method)
Anthony Barbier2a07e182017-08-04 18:20:27 +0100103{
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100104 if(conv_method == ConvolutionMethodHint::GEMM)
105 {
Georgios Pinitasff421f22017-10-04 16:53:58 +0100106 return instantiate_function<arm_compute::CLConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info, weights_info);
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100107 }
108 else
109 {
Georgios Pinitasff421f22017-10-04 16:53:58 +0100110 return instantiate_direct_function<arm_compute::CLDirectConvolutionLayer, arm_compute::ICLTensor, TargetHint::OPENCL>(input, weights, biases, output, conv_info);
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100111 }
Anthony Barbier2a07e182017-08-04 18:20:27 +0100112}
113
114template <>
Georgios Pinitasff421f22017-10-04 16:53:58 +0100115std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *weights, ITensor *biases, ITensor *output, const PadStrideInfo &conv_info,
116 const WeightsInfo &weights_info,
117 ConvolutionMethodHint conv_method)
Anthony Barbier2a07e182017-08-04 18:20:27 +0100118{
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100119 if(conv_method == ConvolutionMethodHint::GEMM)
120 {
Georgios Pinitasff421f22017-10-04 16:53:58 +0100121 return instantiate_function<arm_compute::NEConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info, weights_info);
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100122 }
123 else
124 {
Georgios Pinitasff421f22017-10-04 16:53:58 +0100125 return instantiate_direct_function<arm_compute::NEDirectConvolutionLayer, arm_compute::ITensor, TargetHint::NEON>(input, weights, biases, output, conv_info);
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100126 }
Anthony Barbier2a07e182017-08-04 18:20:27 +0100127}
128} // namespace
129
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100130/** Grouped Convolution function */
131class GroupedConvolutionFunction final : public arm_compute::IFunction
132{
133public:
134 /** Default Constructor */
135 GroupedConvolutionFunction()
136 : _convolutions()
137 {
138 }
139 /** Default Destructor */
140 ~GroupedConvolutionFunction() final = default;
141 /** Prevent instances from being copy constructed */
142 GroupedConvolutionFunction(const GroupedConvolutionFunction &) = delete;
143 /** Prevent instances from being copy assigned */
144 GroupedConvolutionFunction &operator=(const GroupedConvolutionFunction &) = delete;
145 /** Allow instances to be move constructed */
146 GroupedConvolutionFunction(GroupedConvolutionFunction &&) noexcept = default;
147 /** Allow instances to be move assigned */
148 GroupedConvolutionFunction &operator=(GroupedConvolutionFunction &&) noexcept = default;
149 /** Adds a convolution
150 *
151 * @param convolution Convolution function to add
152 */
153 void add_convolution_function(std::unique_ptr<IFunction> convolution)
154 {
155 _convolutions.emplace_back(std::move(convolution));
156 }
157
158 // Inherited methods overriden:
159 void run() override
160 {
161 for(auto &c : _convolutions)
162 {
163 c->run();
164 }
165 }
166
167private:
168 std::vector<std::unique_ptr<IFunction>> _convolutions;
169};
170
Georgios Pinitasff421f22017-10-04 16:53:58 +0100171std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
Anthony Barbier2a07e182017-08-04 18:20:27 +0100172{
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100173 // Set weights and biases info
Anthony Barbier2a07e182017-08-04 18:20:27 +0100174 if(_weights.tensor() == nullptr)
175 {
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100176 _weights.set_info(TensorInfo(TensorShape(_conv_width, _conv_height, input->info()->dimension(2) / _num_groups, _ofm),
177 input->info()->num_channels(), input->info()->data_type(),
Anthony Barbier2a07e182017-08-04 18:20:27 +0100178 input->info()->fixed_point_position()));
179 }
180 if(_biases.tensor() == nullptr)
181 {
182 _biases.set_info(TensorInfo(TensorShape(_ofm), input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()));
183 }
184
185 std::unique_ptr<arm_compute::IFunction> func;
Georgios Pinitasff421f22017-10-04 16:53:58 +0100186 _target_hint = ctx.hints().target_hint();
187 _input = input;
188 _output = output;
189 const ConvolutionMethodHint conv_method_hint = ctx.hints().convolution_method_hint();
Anthony Barbier2a07e182017-08-04 18:20:27 +0100190
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100191 // Check if the weights and biases are loaded
192 bool weights_are_loaded = _weights.tensor() != nullptr;
193 bool biases_are_loaded = _weights.tensor() != nullptr;
194
195 // Set bias and weights target
Georgios Pinitasff421f22017-10-04 16:53:58 +0100196 _weights.set_target(_target_hint);
197 _biases.set_target(_target_hint);
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100198
199 // Calculate output shape
200 TensorShape output_shape = calculate_convolution_layer_output_shape(_input->info()->tensor_shape(), _weights.info().tensor_shape(), _conv_info);
201
202 // Output auto inizialitation if not yet initialized
203 arm_compute::auto_init_if_empty(*_output->info(), output_shape, 1, _input->info()->data_type(), _input->info()->fixed_point_position());
204
205 // Create appropriate convolution function
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100206 if(_num_groups == 1)
Anthony Barbier2a07e182017-08-04 18:20:27 +0100207 {
Georgios Pinitasff421f22017-10-04 16:53:58 +0100208 func = instantiate_convolution(conv_method_hint);
Anthony Barbier2a07e182017-08-04 18:20:27 +0100209 }
210 else
211 {
Georgios Pinitasff421f22017-10-04 16:53:58 +0100212 func = instantiate_grouped_convolution(conv_method_hint);
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100213 }
214
215 // Fill weights
216 if(!weights_are_loaded)
217 {
218 _weights.allocate_and_fill_if_needed();
219 }
220 // Fill biases
221 if(!biases_are_loaded)
222 {
223 _biases.allocate_and_fill_if_needed();
Anthony Barbier2a07e182017-08-04 18:20:27 +0100224 }
225
226 return func;
227}
228
229void ConvolutionLayer::print_info()
230{
Georgios Pinitasff421f22017-10-04 16:53:58 +0100231 if(_target_hint == TargetHint::OPENCL)
Anthony Barbier2a07e182017-08-04 18:20:27 +0100232 {
233 std::cout << "Instantiating CLConvolutionLayer";
234 }
235 else
236 {
237 std::cout << "Instantiating NEConvolutionLayer";
238 }
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100239 std::cout << " Data Type: " << _input->info()->data_type()
240 << " Input Shape: " << _input->info()->tensor_shape()
241 << " Weights shape: " << _weights.info().tensor_shape()
242 << " Biases Shape: " << _biases.info().tensor_shape()
243 << " Output Shape: " << _output->info()->tensor_shape()
244 << " PadStrideInfo: " << _conv_info
245 << " Groups: " << _num_groups
246 << " WeightsInfo: " << _weights_info
247 << std::endl;
248}
249
250std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_convolution(ConvolutionMethodHint conv_method_hint)
251{
252 std::unique_ptr<arm_compute::IFunction> func;
Georgios Pinitasff421f22017-10-04 16:53:58 +0100253 if(_target_hint == TargetHint::OPENCL)
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100254 {
Georgios Pinitasff421f22017-10-04 16:53:58 +0100255 func = instantiate<TargetHint::OPENCL>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100256 }
257 else
258 {
Georgios Pinitasff421f22017-10-04 16:53:58 +0100259 func = instantiate<TargetHint::NEON>(_input, _weights.tensor(), _biases.tensor(), _output, _conv_info, _weights_info, conv_method_hint);
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100260 }
261 return func;
262}
263
264std::unique_ptr<arm_compute::IFunction> ConvolutionLayer::instantiate_grouped_convolution(ConvolutionMethodHint conv_method_hint)
265{
266 // Get tensor shapes
267 TensorShape input_shape = _input->info()->tensor_shape();
268 TensorShape output_shape = _output->info()->tensor_shape();
269 TensorShape weights_shape = _weights.info().tensor_shape();
270 TensorShape biases_shape = _biases.info().tensor_shape();
271
272 ARM_COMPUTE_ERROR_ON_MSG((input_shape.z() % _num_groups) != 0, "Input depth not multiple of the number of groups!");
273 ARM_COMPUTE_ERROR_ON_MSG((output_shape.z() % _num_groups) != 0, "Output depth not multiple of the number of groups!");
274 ARM_COMPUTE_ERROR_ON_MSG((weights_shape[3] % _num_groups) != 0, "Number of kernels not multiple of the number of groups!");
275 ARM_COMPUTE_ERROR_ON_MSG((biases_shape.x() % _num_groups) != 0, "Biases not multiple of the number of groups!");
276
277 // Create a grouped convolution function
278 auto grouped_conv = arm_compute::support::cpp14::make_unique<GroupedConvolutionFunction>();
279
280 // Create sub-tensors vectors
281 _is = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
282 _os = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
283 _ws = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
284 _bs = arm_compute::support::cpp14::make_unique<SubTensor[]>(_num_groups);
285
286 // Calculate sub-tensor splits
287 const int input_split = input_shape.z() / _num_groups;
288 const int output_split = output_shape.z() / _num_groups;
289 const int weights_split = weights_shape[3] / _num_groups;
290 const int biases_split = biases_shape.x() / _num_groups;
291
292 // Calculate sub-tensor shapes
293 input_shape.set(2, input_split);
294 output_shape.set(2, output_split);
295 weights_shape.set(3, weights_split);
296 biases_shape.set(0, biases_split);
297
298 // Configure sub-tensors
299 for(int i = 0; i < static_cast<int>(_num_groups); ++i)
300 {
301 // Create convolution function
302 std::unique_ptr<arm_compute::IFunction> func;
303
304 // Calculate sub-tensors starting coordinates
305 Coordinates input_coord(0, 0, input_split * i);
306 Coordinates output_coord(0, 0, output_split * i);
307 Coordinates weights_coord(0, 0, 0, weights_split * i);
308 Coordinates biases_coord(biases_split * i);
309
310 // Create sub-tensors for input, output, weights and bias
Georgios Pinitasff421f22017-10-04 16:53:58 +0100311 auto hint_to_use = (_target_hint == TargetHint::OPENCL) ? TargetHint::OPENCL : TargetHint::NEON;
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100312 _is[i] = SubTensor(_input, input_shape, input_coord, hint_to_use);
313 _os[i] = SubTensor(_output, output_shape, output_coord, hint_to_use);
314 _ws[i] = SubTensor(_weights.tensor(), weights_shape, weights_coord, hint_to_use);
315 _bs[i] = SubTensor(_biases.tensor(), biases_shape, biases_coord, hint_to_use);
316
317 // Instantiate convolution function
Georgios Pinitasff421f22017-10-04 16:53:58 +0100318 if(_target_hint == TargetHint::OPENCL)
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100319 {
Georgios Pinitasff421f22017-10-04 16:53:58 +0100320 func = instantiate<TargetHint::OPENCL>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100321 }
322 else
323 {
Georgios Pinitasff421f22017-10-04 16:53:58 +0100324 func = instantiate<TargetHint::NEON>(_is[i].tensor(), _ws[i].tensor(), _bs[i].tensor(), _os[i].tensor(), _conv_info, _weights_info, conv_method_hint);
Georgios Pinitas6f669f02017-09-26 12:32:57 +0100325 }
326
327 // Add convolution function to the list of convolutions for the grouped convolution
328 grouped_conv->add_convolution_function(std::move(func));
329 }
330
331 return std::move(grouped_conv);
Anthony Barbier2a07e182017-08-04 18:20:27 +0100332}