blob: b2a77ba7e342684ef1da9f51d37606fe0dc6cd7d [file] [log] [blame]
John Kesapides8d942692019-02-26 14:52:12 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2019-2020 Arm Limited.
John Kesapides8d942692019-02-26 14:52:12 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/graph.h"
25
John Kesapides8d942692019-02-26 14:52:12 +000026#include "tests/NEON/Accessor.h"
27#include "tests/validation/Validation.h"
28#include "tests/validation/reference/DepthwiseConvolutionLayer.h"
29#include "tests/validation/reference/Permute.h"
30
31#include "utils/CommonGraphOptions.h"
32#include "utils/GraphUtils.h"
33#include "utils/Utils.h"
34
35#include "ValidateExample.h"
36#include "graph_validate_utils.h"
37
38#include <utility>
39
40using namespace arm_compute::utils;
41using namespace arm_compute::graph::frontend;
42using namespace arm_compute::graph_utils;
43using namespace arm_compute::graph;
44using namespace arm_compute;
45using namespace arm_compute::test;
46using namespace arm_compute::test::validation;
47
48namespace
49{
50/** Depthwise Convolution command line options used to configure the graph examples
51 *
52 * (Similar to common options)
53 * The options in this object get populated when "parse()" is called on the parser used to construct it.
54 * The expected workflow is:
55 *
56 * CommandLineParser parser;
57 * CommonOptions options( parser );
58 * parser.parse(argc, argv);
59 */
60class DepthConvolutionOptions final : public CommonGraphValidateOptions
61{
62public:
63 explicit DepthConvolutionOptions(CommandLineParser &parser) noexcept
64 : CommonGraphValidateOptions(parser),
65 width(parser.add_option<SimpleOption<int>>("width", 9)),
66 height(parser.add_option<SimpleOption<int>>("height", 9)),
67 channels(parser.add_option<SimpleOption<int>>("channels", 1)),
68 batch(parser.add_option<SimpleOption<int>>("batch", 1)),
69 weights_width(parser.add_option<SimpleOption<int>>("weights_width", 3)),
70 weights_height(parser.add_option<SimpleOption<int>>("weights_height", 3)),
71 padding_top(parser.add_option<SimpleOption<int>>("padding_top", 0)),
72 padding_left(parser.add_option<SimpleOption<int>>("padding_left", 0)),
73 padding_bottom(parser.add_option<SimpleOption<int>>("padding_bottom", 0)),
74 padding_right(parser.add_option<SimpleOption<int>>("padding_right", 0)),
75 stride_x(parser.add_option<SimpleOption<int>>("stride_x", 1)),
76 stride_y(parser.add_option<SimpleOption<int>>("stride_y", 1)),
77 padding_mode(),
78 conv_mode(),
79 depth_multiplier(parser.add_option<SimpleOption<int>>("depth_multiplier", 1)),
80 data_layout(),
81 scale(parser.add_option<SimpleOption<float>>("scale", 1.0f)),
82 offset(parser.add_option<SimpleOption<int>>("offset", 0)),
83 weights_scale(parser.add_option<SimpleOption<float>>("weights_scale", 1.0f)),
84 weights_offset(parser.add_option<SimpleOption<int>>("weights_offset", 0)),
85 output_scale(parser.add_option<SimpleOption<float>>("output_scale", 1.0f)),
86 output_offset(parser.add_option<SimpleOption<int>>("output_offset", 0)),
87 input_range_low(parser.add_option<SimpleOption<uint64_t>>("input_range_low")),
88 input_range_high(parser.add_option<SimpleOption<uint64_t>>("input_range_high")),
89 weights_range_low(parser.add_option<SimpleOption<uint64_t>>("weights_range_low")),
90 weights_range_high(parser.add_option<SimpleOption<uint64_t>>("weights_range_high")),
91 input_npy(parser.add_option<SimpleOption<std::string>>("input_image")),
92 output_npy(parser.add_option<SimpleOption<std::string>>("reference_image")),
93 weights_npy(parser.add_option<SimpleOption<std::string>>("weights_npy")),
94 bias_npy(parser.add_option<SimpleOption<std::string>>("bias_image"))
95 {
96 const std::set<ConvolutionPaddingMode> available_padding_modes
97 {
98 ConvolutionPaddingMode::Valid,
99 ConvolutionPaddingMode::Same
100 };
101
102 const std::set<arm_compute::graph::DepthwiseConvolutionMethod> supported_convolution_methods
103 {
104 arm_compute::graph::DepthwiseConvolutionMethod::Default,
105 arm_compute::graph::DepthwiseConvolutionMethod::GEMV,
106 arm_compute::graph::DepthwiseConvolutionMethod::Optimized3x3,
107 };
108
109 const std::set<DataLayout> supported_data_layouts
110 {
111 DataLayout::NHWC,
112 DataLayout::NCHW,
113 };
114
115 padding_mode = parser.add_option<EnumOption<ConvolutionPaddingMode>>("padding_mode", available_padding_modes, ConvolutionPaddingMode::Valid);
116 conv_mode = parser.add_option<EnumOption<arm_compute::graph::DepthwiseConvolutionMethod>>("convolution_method", supported_convolution_methods,
117 arm_compute::graph::DepthwiseConvolutionMethod::Default);
118 data_layout = parser.add_option<EnumOption<DataLayout>>("layout", supported_data_layouts, DataLayout::NHWC);
119
120 padding_mode->set_help("Set padding mode");
121 width->set_help("Set Input dimension width");
122 height->set_help("Set Input dimension height");
123 channels->set_help("Set Input dimension channels");
124 batch->set_help("Set Input dimension batch");
125 weights_width->set_help("Set weights_dimensions width");
126 weights_height->set_help("Set weights_dimensions height");
127 padding_top->set_help("Set padding top");
128 padding_bottom->set_help("Set padding bottom");
129 padding_left->set_help("Set padding left");
130 padding_right->set_help("Set padding right");
131 stride_x->set_help("Set padding stride x");
132 stride_y->set_help("Set padding stride y");
133 conv_mode->set_help("Set convolution method");
134 data_layout->set_help("Data layout to use");
135 scale->set_help("Quantization scale from QASYMM8");
136 offset->set_help("Quantization offset from QASYMM8");
137 output_scale->set_help("Quantization scale from QASYMM8");
138 output_offset->set_help("Quantization offset from QASYMM8");
139 input_npy->set_help("Use input .npy instead");
140 output_npy->set_help("Use .npy as a reference");
141 input_range_low->set_help("Lower bound for input randomization range");
142 input_range_high->set_help("Lower bound for input randomization range");
143 weights_scale->set_help("Quantization scale from QASYMM8");
144 weights_offset->set_help("Quantization offset from QASYMM8");
145 weights_range_low->set_help("Lower bound for input randomization range");
146 weights_range_high->set_help("Lower bound for input randomization range");
147 depth_multiplier->set_help("Depth multiplier");
148 }
149
150 /** Fill out the supplied parameters with user supplied parameters
151 *
152 * @param[out] os Output stream.
153 * @param[in] common_params Example parameters to output
154 *
155 * @return None.
156 */
157 void consume_parameters(ExampleParams &common_params)
158 {
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100159 common_params.input.width = width->value();
160 common_params.input.height = height->value();
161 common_params.input.fm = channels->value();
162 common_params.input.batch = batch->value();
163 common_params.input.quant_info = QuantizationInfo(scale->value(), offset->value());
164 common_params.input.npy = input_npy->value();
165 common_params.input.range_low = input_range_low->value();
166 common_params.input.range_high = input_range_high->value();
John Kesapides8d942692019-02-26 14:52:12 +0000167
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100168 common_params.weights.width = weights_width->value();
169 common_params.weights.height = weights_height->value();
170 common_params.weights.npy = weights_npy->value();
171 common_params.weights.range_low = weights_range_low->value();
172 common_params.weights.range_high = weights_range_high->value();
173 common_params.weights.quant_info = QuantizationInfo(weights_scale->value(), weights_offset->value());
John Kesapides8d942692019-02-26 14:52:12 +0000174
175 common_params.bias.npy = bias_npy->value();
176
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100177 common_params.output.quant_info = QuantizationInfo(output_scale->value(), output_offset->value());
178 common_params.output.npy = output_npy->value();
John Kesapides8d942692019-02-26 14:52:12 +0000179
180 common_params.convolution.padding_mode = padding_mode->value();
181 common_params.convolution.padding_top = padding_top->value();
182 common_params.convolution.padding_bottom = padding_bottom->value();
183 common_params.convolution.padding_left = padding_left->value();
184 common_params.convolution.padding_right = padding_right->value();
185 common_params.convolution.padding_stride_x = stride_x->value();
186 common_params.convolution.padding_stride_y = stride_y->value();
187 common_params.convolution.depth_multiplier = depth_multiplier->value();
188
189 common_params.data_type = data_type->value();
190 common_params.data_layout = data_layout->value();
191 common_params.depth_convolution_method = conv_mode->value();
192 }
193
194 void print_parameters(::std::ostream &os, const ExampleParams &common_params) override
195 {
196 os << "Threads : " << common_params.common_params.threads << std::endl;
197 os << "Target : " << common_params.common_params.target << std::endl;
198 os << "Data type : " << common_params.data_type << std::endl;
199 os << "Input dimensions(X,Y, Channels, Batch) : (" << common_params.input.width << "," << common_params.input.height << "," << common_params.input.fm << "," << common_params.input.batch << ")"
200 << std::endl;
201 os << "Weight dimensions(X,Y, Channels(same as input)) : (" << common_params.weights.width << "," << common_params.weights.height << "," << common_params.input.fm << ","
202 << ")" << std::endl;
203 os << "Padding(top, bottom, left, right) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.convolution.padding_bottom << "," <<
204 common_params.convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_params.convolution.padding_stride_x << "," << common_params.convolution.padding_stride_y <<
205 ")" << std::endl;
206 os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl;
207 os << "Convolution Method: " << common_params.depth_convolution_method << std::endl;
208 os << "Depth multiplier: " << common_params.convolution.depth_multiplier;
209 }
210
211 /** Prevent instances of this class from being copied (As this class contains pointers) */
212 DepthConvolutionOptions(const DepthConvolutionOptions &) = delete;
213 /** Prevent instances of this class from being copied (As this class contains pointers) */
214 DepthConvolutionOptions &operator=(const DepthConvolutionOptions &) = delete;
215 /** Allow instances of this class to be moved */
216 DepthConvolutionOptions(DepthConvolutionOptions &&) noexcept(true) = default;
217 /** Allow instances of this class to be moved */
218 DepthConvolutionOptions &operator=(DepthConvolutionOptions &&) noexcept(true) = default;
219 /** Default destructor */
220 ~DepthConvolutionOptions() override = default;
221
Michalis Spyroubcfd09a2019-05-01 13:03:59 +0100222private:
John Kesapides8d942692019-02-26 14:52:12 +0000223 SimpleOption<int> *width; /**< Input width */
224 SimpleOption<int> *height; /**< Input height */
225 SimpleOption<int> *channels; /**< Input channels */
226 SimpleOption<int> *batch; /**< Input batch */
227 SimpleOption<int> *weights_width; /**< weights width */
228 SimpleOption<int> *weights_height; /**< weights height */
229 SimpleOption<int> *padding_top; /**< Padding top */
230 SimpleOption<int> *padding_left; /**< Padding left */
231 SimpleOption<int> *padding_bottom; /**< Padding bottom */
232 SimpleOption<int> *padding_right; /**< Padding right */
233 SimpleOption<int> *stride_x; /**< Padding stride x */
234 SimpleOption<int> *stride_y; /**< Padding stride y */
235 EnumOption<ConvolutionPaddingMode> *padding_mode; /**< Padding mode */
236 EnumOption<arm_compute::graph::DepthwiseConvolutionMethod> *conv_mode; /**< Convolution method */
237 SimpleOption<int> *depth_multiplier; /**< Depth multiplier */
238 EnumOption<arm_compute::DataLayout> *data_layout; /**< Graph data layout */
239 SimpleOption<float> *scale; /**< Input Quantization scale from QASYMM8 */
240 SimpleOption<int> *offset; /**< Input Quantization offset from QASYMM8 */
241 SimpleOption<float> *weights_scale; /**< Weights Quantization scale from QASYMM8 */
242 SimpleOption<int> *weights_offset; /**< Weights Quantization offset from QASYMM8 */
243 SimpleOption<float> *output_scale; /**< Output Quantization scale from QASYMM8 */
244 SimpleOption<int> *output_offset; /**< Output Quantization offset from QASYMM8 */
245 SimpleOption<uint64_t> *input_range_low; /**< Lower bound for input randomization range */
246 SimpleOption<uint64_t> *input_range_high; /**< Upper bound for input randomization range */
247 SimpleOption<uint64_t> *weights_range_low; /**< Lower bound for weights randomization range */
248 SimpleOption<uint64_t> *weights_range_high; /**< Upper bound for weights randomization range */
249
250 SimpleOption<std::string> *input_npy; /**< Use input .npy image */
251 SimpleOption<std::string> *output_npy; /**< Use output .npy image to verify*/
252 SimpleOption<std::string> *weights_npy; /**< Use weights .npy image */
253 SimpleOption<std::string> *bias_npy; /**< Use bias .npy image */
254};
255
256/** DepthwiseConvolutionLayer Graph example validation accessor class */
257template <typename D>
258class DepthConvolutionVerifyAccessor final : public VerifyAccessor<D>
259{
260public:
261 using BaseClassType = VerifyAccessor<D>;
262 using BaseClassType::BaseClassType;
263 using BaseClassType::_params;
264 using TBias = typename std::conditional<std::is_same<typename std::decay<D>::type, uint8_t>::value, int32_t, D>::type;
265
266public:
267 SimpleTensor<D> reference(SimpleTensor<D> &src, SimpleTensor<D> &weights, SimpleTensor<TBias> &bias, const TensorShape &output_shape) override
268 {
269 // Calculate padding information
270 const PadStrideInfo padding_info = calculate_convolution_padding(_params);
271
272 //Calculate reference
273 return reference::depthwise_convolution<D>(src, weights, bias, output_shape, padding_info,
274 _params.convolution.depth_multiplier,
275 Size2D(1U, 1U),
276 _params.output.quant_info);
277 }
278
279 float relative_tolerance() override
280 {
281 const std::map<arm_compute::graph::Target, const std::map<DataType, float>> relative_tolerance
282 {
283 {
284 arm_compute::graph::Target::CL,
285 { { DataType::F16, 0.01f },
286 { DataType::F32, 0.01f },
287 { DataType::QASYMM8, 0.0f }
288 }
289 },
290 {
291 arm_compute::graph::Target::NEON,
292 { { DataType::F16, 0.01f },
293 { DataType::F32, 0.01f },
294 { DataType::QASYMM8, 1.0f }
295 }
296 }
297 };
298
299 return relative_tolerance.at(_params.common_params.target).at(_params.data_type);
300 }
301
302 float absolute_tolerance() override
303 {
304 const std::map<Target, const std::map<DataType, float>> absolute_tolerance
305 {
306 {
307 Target::CL,
308 { { DataType::F16, 0.0f },
309 { DataType::F32, 0.0000f },
310 { DataType::QASYMM8, 0.0f }
311 }
312 },
313 {
314 Target::NEON,
315 { { DataType::F16, 0.2f },
316 { DataType::F32, 0.002f },
317 { DataType::QASYMM8, 0.0f }
318 }
319 }
320 };
321
322 return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
323 }
324
325 float tolerance_number() override
326 {
327 const std::map<Target, const std::map<DataType, float>> absolute_tolerance
328 {
329 {
330 Target::CL,
331 { { DataType::F16, 0.05f },
332 { DataType::F32, 0.00f },
333 { DataType::QASYMM8, 0.0f }
334 }
335 },
336 {
337 Target::NEON,
338 { { DataType::F16, 0.05f },
339 { DataType::F32, 0.0f },
340 { DataType::QASYMM8, 0.0f }
341 }
342 }
343 };
344
345 return absolute_tolerance.at(_params.common_params.target).at(_params.data_type);
346 }
347};
348
349} // namespace
350
351class GraphDepthwiseConvolutionValidateExample final : public GraphValidateExample<DepthwiseConvolutionLayer, DepthConvolutionOptions, DepthConvolutionVerifyAccessor>
352{
353 using GraphValidateExample::graph;
354
355public:
356 GraphDepthwiseConvolutionValidateExample()
357 : GraphValidateExample("DepthWiseConvolution Graph example")
358 {
359 }
360
361 DepthwiseConvolutionLayer GraphFunctionLayer(ExampleParams &params) override
362 {
363 const PixelValue lower = PixelValue(params.input.range_low, params.data_type, params.input.quant_info);
364 const PixelValue upper = PixelValue(params.input.range_high, params.data_type, params.input.quant_info);
365
366 const PixelValue weights_lower = PixelValue(params.weights.range_low, params.data_type, params.weights.quant_info);
367 const PixelValue weights_upper = PixelValue(params.weights.range_high, params.data_type, params.weights.quant_info);
368
369 // Calculate padding information
370 const PadStrideInfo padding_info = calculate_convolution_padding(params);
371
372 return DepthwiseConvolutionLayer(params.weights.width, params.weights.height,
373 get_accessor(params.weights, weights_lower, weights_upper, 1),
374 get_accessor(params.bias, lower, upper, 2),
375 padding_info, params.convolution.depth_multiplier, params.weights.quant_info, params.output.quant_info);
376 }
377};
378
379/** Main program for Graph Depthwise Convolution test
380 *
381 * @param[in] argc Number of arguments
382 * @param[in] argv Arguments ( Input dimensions [width, height, channels, batch]
383 * Weights dimensions [width, height, channels]
384 * Padding [top,bottom,left,right, Stride x, Stride y, mode [Valid / Same / Manual] )
385 * Convolution Method[ Default/GEMV/Optimized3x3]
386 * Verification[tolerance_number,absolute_tolerance,relative_tolerance] )
387 *
388 */
389int main(int argc, char **argv)
390{
391 return arm_compute::utils::run_example<GraphDepthwiseConvolutionValidateExample>(argc, argv);
392}