blob: cb48598921a8b752aab9eda42bea4731b9ea8a1a [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NELocallyConnectedLayer.h"
25
26#include "arm_compute/core/PixelValue.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
29#include "arm_compute/runtime/NEON/NEScheduler.h"
30
31#include <cmath>
32#include <tuple>
33
34using namespace arm_compute;
35
Georgios Pinitas658039b2017-09-15 16:30:50 +010036NELocallyConnectedLayer::NELocallyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
37 : _memory_group(std::move(memory_manager)), _input_im2col_kernel(), _weights_reshape_kernel(), _mm_kernel(), _output_col2im_kernel(), _input_im2col_reshaped(), _weights_reshaped(), _gemm_output(),
38 _is_first_run(false)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010039{
40}
41
42void NELocallyConnectedLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info)
43{
44 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
45 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::F32);
46 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32);
47 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
48 ARM_COMPUTE_ERROR_ON(weights->info()->dimension(2) != input->info()->dimension(2));
49
50 if(biases != nullptr)
51 {
52 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::F32);
53 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
54 ARM_COMPUTE_ERROR_ON(biases->info()->dimension(0) != weights->info()->dimension(3));
55 ARM_COMPUTE_ERROR_ON(biases->info()->num_dimensions() > 2);
56 }
57
58 bool _has_bias = (biases != nullptr);
59 _is_first_run = true;
60
61 // Get parameters for conv_info
62 unsigned int stride_x = 0;
63 unsigned int stride_y = 0;
64 unsigned int pad_x = 0;
65 unsigned int pad_y = 0;
66 std::tie(stride_x, stride_y) = conv_info.stride();
67 std::tie(pad_x, pad_y) = conv_info.pad();
68
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010069 const unsigned int kernel_width = weights->info()->dimension(0);
70 const unsigned int kernel_height = weights->info()->dimension(1);
71
Anthony Barbier6ff3b192017-09-04 18:44:23 +010072 // Get convolved dimensions
73 unsigned int conv_w = 0;
74 unsigned int conv_h = 0;
Gian Marco Iodice13edbff2017-06-26 17:20:16 +010075 std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(0), input->info()->dimension(1), kernel_width, kernel_height,
Gian Marco Iodice4e288692017-06-27 11:41:59 +010076 conv_info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010077
78 ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
79 ARM_COMPUTE_ERROR_ON_MSG(weights->info()->dimension(4) != (conv_w * conv_h), "Weights shape does not match the expected one");
80
81 // Create tensor to store the reshaped weights
82 const size_t mat_weights_cols = weights->info()->dimension(3);
83 const size_t mat_weights_rows = weights->info()->dimension(0) * weights->info()->dimension(1) * weights->info()->dimension(2) + ((_has_bias) ? 1 : 0);
84 const size_t mat_weights_num = weights->info()->dimension(4);
85
86 const TensorShape shape_wr(mat_weights_cols, mat_weights_rows, mat_weights_num);
87
88 _weights_reshaped.allocator()->init(TensorInfo(shape_wr, 1, weights->info()->data_type()));
89
90 // Create tensor to store im2col reshaped inputs
91 const size_t mat_input_cols = mat_weights_rows;
92 const size_t mat_input_rows = conv_w * conv_h;
93 TensorShape shape_im2col = input->info()->tensor_shape();
94 shape_im2col.set(0, mat_input_cols);
95 shape_im2col.set(1, mat_input_rows);
96 shape_im2col.set(2, 1);
97
98 _input_im2col_reshaped.allocator()->init(TensorInfo(shape_im2col, 1, input->info()->data_type()));
99
100 // Create locally connected layer output tensor
101 TensorShape shape_gemm = _input_im2col_reshaped.info()->tensor_shape();
102 shape_gemm.set(0, mat_weights_cols);
103 shape_gemm.set(1, mat_input_rows);
104 _gemm_output.allocator()->init(TensorInfo(shape_gemm, 1, input->info()->data_type()));
105
Georgios Pinitas658039b2017-09-15 16:30:50 +0100106 // Manage intermediate buffers
107 _memory_group.manage(&_input_im2col_reshaped);
108 _memory_group.manage(&_gemm_output);
109
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100110 // Configure kernels
Gian Marco Iodice13edbff2017-06-26 17:20:16 +0100111 _input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _has_bias);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100112 _weights_reshape_kernel.configure(weights, biases, &_weights_reshaped);
113 _mm_kernel.configure(&_input_im2col_reshaped, &_weights_reshaped, &_gemm_output);
114 _output_col2im_kernel.configure(&_gemm_output, output, std::make_pair(conv_w, conv_h));
115
116 // Allocate intermediate tensors
117 _weights_reshaped.allocator()->allocate();
118 _input_im2col_reshaped.allocator()->allocate();
119 _gemm_output.allocator()->allocate();
120}
121
122void NELocallyConnectedLayer::run()
123{
124 // Run weights reshaping (Runs once for every configure)
125 if(_is_first_run)
126 {
127 _is_first_run = false;
128 NEScheduler::get().schedule(&_weights_reshape_kernel, 3);
129 }
130
Georgios Pinitas658039b2017-09-15 16:30:50 +0100131 _memory_group.acquire();
132
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100133 // Run input reshaping
134 NEScheduler::get().schedule(&_input_im2col_kernel, Window::DimY);
135
136 // Runs GEMM on reshaped matrices
137 NEScheduler::get().schedule(&_mm_kernel, Window::DimX);
138
139 // Reshape output matrix
140 NEScheduler::get().schedule(&_output_col2im_kernel, Window::DimY);
Georgios Pinitas658039b2017-09-15 16:30:50 +0100141
142 _memory_group.release();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100143}