blob: c01588a164f5e196fd780319895af76db2a351fb [file] [log] [blame]
giuros014a8ec802019-03-18 13:25:05 +00001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h"
25
26#include "arm_compute/core/Helpers.h"
27#include "arm_compute/core/Utils.h"
28#include "arm_compute/core/Validate.h"
29#include "arm_compute/core/utils/misc/ShapeCalculator.h"
30#include "arm_compute/runtime/CL/CLScheduler.h"
31#include "arm_compute/runtime/CPP/CPPScheduler.h"
32#include "utils/TypePrinter.h"
33
34#include <memory>
35#include <tuple>
36
37namespace arm_compute
38{
39using namespace arm_compute::misc::shape_calculator;
40
41CLDirectDeconvolutionLayer::CLDirectDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
42 : _memory_group(std::move(memory_manager)),
43 _scale_f(),
44 _conv_f(),
45 _flip_weights(),
46 _scaled_output(),
47 _original_weights(nullptr),
48 _weights_flipped(),
49 _is_prepared(false)
50{
51}
52
53Status CLDirectDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &info,
54 const WeightsInfo &weights_info)
55{
56 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
57 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
58 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
59
60 const DataLayout data_layout = input->data_layout();
61
62 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
63 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
64 const size_t idx_c = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
65
66 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) != weights->dimension(idx_h));
67 ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) < 1);
68 ARM_COMPUTE_RETURN_ERROR_ON(!info.padding_is_symmetric());
69
70 const unsigned int stride_x = info.stride().first;
71 const unsigned int stride_y = info.stride().second;
72
73 auto out_dims = deconvolution_output_dimensions(input->dimension(idx_w), input->dimension(idx_h), weights->dimension(idx_w), weights->dimension(idx_h),
74 info.pad().first, info.pad().second, stride_x, stride_y);
75
76 const TensorShape output_shape = compute_deconvolution_output_shape(out_dims, *input, *weights);
77
78 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, weights);
79
80 if(bias != nullptr)
81 {
82 if(is_data_type_quantized_asymmetric(input->data_type()))
83 {
84 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
85 }
86 else
87 {
88 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
89 }
90 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, bias);
91 }
92
93 ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(idx_w) != output_shape[idx_w], "Output's width is invalid.");
94 ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(idx_h) != output_shape[idx_h], "Output's height is invalid.");
95 ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(idx_c) != output_shape[idx_c], "Output's depth is invalid.");
96
97 unsigned int padx = 0;
98 unsigned int pady = 0;
99 const TensorShape scale_out_shape = compute_deconvolution_upsampled_shape(*input, *weights, stride_x, stride_y, 0, 0, out_dims, padx, pady);
100 TensorInfo scale_out_info(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(scale_out_shape).set_data_layout(data_layout));
101 const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL);
102
103 ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionLayerUpsample::validate(input, &scale_out_info, BorderSize(), info));
104 ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayer::validate(&scale_out_info, weights, bias, output, conv_info, weights_info));
105
106 return Status{};
107}
108
109void CLDirectDeconvolutionLayer::configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info,
110 const WeightsInfo &weights_info)
111{
112 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
113
114 const unsigned int stride_x = info.stride().first;
115 const unsigned int stride_y = info.stride().second;
116
117 const DataLayout data_layout = input->info()->data_layout();
118
119 const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
120 const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
121
122 _original_weights = weights;
123 _weights_flipped.allocator()->init(weights->info()->clone()->set_data_layout(data_layout));
124 _flip_weights.configure(weights, &_weights_flipped);
125
126 auto out_dims = deconvolution_output_dimensions(input->info()->dimension(idx_w), input->info()->dimension(idx_h), weights->info()->dimension(idx_w), weights->info()->dimension(idx_h),
127 info.pad().first, info.pad().second, stride_x, stride_y);
128
129 const TensorShape output_shape = compute_deconvolution_output_shape(out_dims, *input->info(), *weights->info());
130
131 // Output auto initialization if not yet initialized
132 auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).set_data_layout(data_layout));
133
134 // Perform validation step
135 ARM_COMPUTE_ERROR_THROW_ON(CLDirectDeconvolutionLayer::validate(input->info(), weights->info(), bias == nullptr ? nullptr : bias->info(), output->info(), info));
136
137 _is_prepared = weights_info.retain_internal_weights();
138
139 _memory_group.manage(&_scaled_output);
140
141 // Find the upsampled dimensions and the padding needed for the convolution with stride 1 in order to match output shape
142 unsigned int padx = 0;
143 unsigned int pady = 0;
144 const TensorShape scale_out_shape = compute_deconvolution_upsampled_shape(*input->info(), *weights->info(), stride_x, stride_y, 0, 0, out_dims, padx, pady);
145
146 TensorInfo scale_out_info(scale_out_shape, 1, input->info()->data_type(), input->info()->quantization_info());
147 scale_out_info.set_data_layout(data_layout);
148 _scaled_output.allocator()->init(scale_out_info);
149
150 // configure scale function
151 const PadStrideInfo upsample_info(stride_x, stride_y, padx / 2, pady / 2);
152 _scale_f.configure(input, &_scaled_output, BorderSize(), upsample_info);
153
154 // setup the function to convolve the upscaled output
155 const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL);
156 _conv_f.configure(&_scaled_output, &_weights_flipped, bias, output, conv_info, weights_info);
157 _scaled_output.allocator()->allocate();
158}
159
160void CLDirectDeconvolutionLayer::run()
161{
162 prepare();
163
164 _memory_group.acquire();
165
166 _scale_f.run();
167 _conv_f.run();
168
169 _memory_group.release();
170}
171
172void CLDirectDeconvolutionLayer::prepare()
173{
174 if(!_is_prepared)
175 {
176 ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
177
178 // Run weights flipping and mark original weights tensor as unused
179 _weights_flipped.allocator()->allocate();
180 _weights_flipped.map(true);
181 _original_weights->map(CLScheduler::get().queue(), true);
182 CPPScheduler::get().schedule(&_flip_weights, Window::DimZ);
183 _weights_flipped.unmap();
184 _original_weights->unmap(CLScheduler::get().queue());
185 _original_weights->mark_as_unused();
186
187 // Prepare convolution
188 _conv_f.prepare();
189
190 if(!_weights_flipped.is_used())
191 {
192 _weights_flipped.allocator()->free();
193 }
194
195 _is_prepared = true;
196 }
197}
198} // namespace arm_compute