blob: 689ecba5fb4719b07a728b6ef6d6f978ee09c29d [file] [log] [blame]
Pablo Tello9ceebbe2018-01-10 16:44:13 +00001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "convolution.hpp"
26#include "winograd_layer.hpp"
27#include "tensor.hpp"
28
29
30/** Determine how much memory (in units of TIn) to allocate for the transformed
31 * weights.
32 */
33template <
34 int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols,
35 typename TIn, typename TOut
36>
37unsigned int WinogradConvolutionLayer<
38 OutputTileRows, OutputTileCols, KernelRows, KernelCols, TIn, TOut
39>::get_weight_storage_size(
40 const int n_output_channels, /** Number of output feature maps. */
41 const int n_input_channels /** Number of input feature maps. */
42)
43{
44 const KernelShape shape(
45 n_output_channels, KernelRows, KernelCols, n_input_channels
46 );
47 return static_cast<unsigned int>(
48 // WinogradConv returns the size in bytes, we divide by `sizeof(TIn)` to
49 // express that in units of TIn.
50 WinogradConv::get_kernel_storage_size(shape) / sizeof(TIn)
51 );
52}
53
54
55/** Determine how much memory (in units of TIn) to allocate for the transformed
56 * input.
57 */
58template <
59 int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols,
60 typename TIn, typename TOut
61>
62unsigned int WinogradConvolutionLayer<
63 OutputTileRows, OutputTileCols, KernelRows, KernelCols, TIn, TOut
64>::get_input_storage_size(
65 const int n_batches, /** Number of batches in the input tensor. */
66 const int n_channels, /** Number of feature maps in the input tensor. */
67 const int n_rows, /** Number of rows in each feature map. */
68 const int n_cols, /** Number of columns in each feature map. */
69 const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
70)
71{
72 // Construct shapes for the input and kernel tensors.
73 const Tensor4DShape input_shape(n_batches, n_rows, n_cols, n_channels);
74 const KernelShape kern_shape(1, KernelRows, KernelCols, n_channels);
75 const PaddingType padding = (same_padding) ? PADDING_SAME : PADDING_VALID;
76
77 // Return the size, converted into units of TIn
78 return static_cast<unsigned int>(
79 WinogradConv::get_input_storage_size(kern_shape, input_shape, padding) /
80 sizeof(TIn)
81 );
82}
83
84
85/** Determine how much memory (in units of TOut) to allocate for the (Winograd
86 * domain) output.
87 */
88template <
89 int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols,
90 typename TIn, typename TOut
91>
92unsigned int WinogradConvolutionLayer<
93 OutputTileRows, OutputTileCols, KernelRows, KernelCols, TIn, TOut
94>::get_output_storage_size(
95 const int n_batches, /** Number of batches in the output tensor. */
96 const int n_rows, /** Number of rows in each feature map of the input tensor. */
97 const int n_cols, /** Number of columns in each feature map of the input tensor. */
98 const int n_output_channels, /** Number of feature maps in the output tensor. */
99 const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
100)
101{
102 // Construct shapes for the input and kernel tensors.
103 const Tensor4DShape input_shape(n_batches, n_rows, n_cols, 1);
104 const KernelShape kern_shape(n_output_channels, KernelRows, KernelCols, 1);
105 const PaddingType padding = (same_padding) ? PADDING_SAME : PADDING_VALID;
106
107 // Return the size, converted into units of TOut
108 return static_cast<unsigned int>(
109 WinogradConv::get_output_storage_size(kern_shape, input_shape, padding) /
110 sizeof(TOut)
111 );
112}
113
114
115/** Get the shape (rows, cols) of a feature map of the output tensor. */
116template <
117 int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols,
118 typename TIn, typename TOut
119>
120std::pair<int, int> WinogradConvolutionLayer<
121 OutputTileRows, OutputTileCols, KernelRows, KernelCols, TIn, TOut
122>::get_output_feature_map_shape(
123 const int n_input_rows, /** Number of rows in the input feature map. */
124 const int n_input_cols, /** Number of columns in the input feature map. */
125 const bool same_padding /** Use "SAME" padding, otherwise use "VALID". */
126)
127{
128 // Construct shapes for the input and kernel tensors.
129 const Tensor4DShape input_shape(1, n_input_rows, n_input_cols, 1);
130 const KernelShape kern_shape(1, KernelRows, KernelCols, 1);
131 const PaddingType padding = (same_padding) ? PADDING_SAME : PADDING_VALID;
132
133 // Compute the new shape
134 const auto output_shape = WinogradConv::get_output_shape(
135 kern_shape, input_shape, padding
136 );
137
138 return std::make_pair(output_shape.n_rows, output_shape.n_cols);
139}
140
141
142/** Create a new Winograd convolution layer.
143 */
144template <
145 int OutputTileRows, int OutputTileCols, int KernelRows, int KernelCols,
146 typename TIn, typename TOut
147>
148WinogradConvolutionLayer<OutputTileRows, OutputTileCols, KernelRows, KernelCols, TIn, TOut>::
149WinogradConvolutionLayer(
150 const int n_batches, /** Number of batches in the input and output tensors. */
151 const int n_input_channels, /** Number of feature maps in a batch of the input tensor. */
152 const int n_input_rows, /** Number of rows in a feature map of the input tensor. */
153 const int n_input_cols, /** Number of columns in a feature map of the input tensor. */
154 const int n_output_channels, /** Number of feature maps in the output tensor. */
155 const bool same_padding, /** Use "SAME" padding, otherwise use "VALID". */
156 const TIn* const weights, /** Pointer to weight tensor in spatial domain. Must be ordered as "Height x Rows x Input Feature Maps x Output Feature Maps. */
157 TIn* const winograd_weights, /** Pointer to storage for weight tensor in the Winograd domain. Must be at least the size returned by `get_weight_storage_size`. */
158 const TIn* const input, /** Pointer to NHWC ordered input tensor, in the spatial domain. */
159 TIn* const winograd_input, /** Pointer to working space for the input tensor in the Winograd domain. Must be at least the size returned by `get_input_storage_size`. */
160 TOut* const output, /** Pointer to NHWC ordered output tensor, in the spatial domain. */
161 TOut* const winograd_output /** Pointer to working space for the output tensor in the Winograd domain. Must be at least the size returned by `get_output_storage_size`. */
162) : _kernel_shape(n_output_channels, KernelRows, KernelCols, n_input_channels),
163 _input_shape(n_batches, n_input_rows, n_input_cols, n_input_channels),
164 _padding(same_padding ? PADDING_SAME : PADDING_VALID),
165 _output_shape(WinogradConv::get_output_shape(_kernel_shape, _input_shape, _padding)),
166 _n_output_rows(_output_shape.n_rows),
167 _n_output_cols(_output_shape.n_cols),
168 _kernel_matrix_stride(WinogradConv::get_kernel_matrix_stride(_kernel_shape)),
169 _kernel_matrix_row_stride(roundup(n_output_channels, WinogradConv::N_BLOCK)),
170 _input_matrix_stride(WinogradConv::get_input_matrix_stride(_kernel_shape, _input_shape, _padding)),
171 _input_matrix_row_stride(n_input_channels),
172 _output_matrix_stride(WinogradConv::get_output_matrix_stride(_kernel_shape, _input_shape, _padding)),
173 _output_matrix_row_stride(_kernel_matrix_row_stride),
174 _tile_rows(iceildiv(_n_output_rows, OutputTileRows)),
175 _tile_cols(iceildiv(_n_output_cols, OutputTileCols)),
176 _m(n_batches * _tile_rows * _tile_cols),
177 _k(n_input_channels),
178 _n(n_output_channels),
179 weights_transform(
180 weights, winograd_weights,
181 _kernel_matrix_stride, _kernel_matrix_row_stride,
182 n_output_channels, n_input_channels
183 ),
184 input_transform(
185 input, n_batches, n_input_rows, n_input_cols, n_input_channels, _padding,
186 winograd_input, _input_matrix_stride, _input_matrix_row_stride
187 ),
188 gemms(
189 WinogradBase::N_GEMMS, _m, _k, _n,
190 _input_matrix_stride, _input_matrix_row_stride,
191 _kernel_matrix_stride, _kernel_matrix_row_stride,
192 _output_matrix_stride, _output_matrix_row_stride,
193 winograd_input, winograd_weights, winograd_output
194 ),
195 output_transform(
196 winograd_output, _output_matrix_stride, _output_matrix_row_stride,
197 output, n_batches, _n_output_rows, _n_output_cols, n_output_channels
198 )
199{
200}
201
202// Instantiate valid implementations.
203template class WinogradConvolutionLayer<2, 2, 3, 3, float, float>;
204template class WinogradConvolutionLayer<4, 4, 3, 3, float, float>;