blob: 334e7081fccc689ddcca4ae7c4a6526537bae5cd [file] [log] [blame]
Georgios Pinitasf554be72018-12-03 16:02:47 +00001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/graph.h"
25#include "support/ToolchainSupport.h"
26#include "utils/CommonGraphOptions.h"
27#include "utils/GraphUtils.h"
28#include "utils/Utils.h"
29
30using namespace arm_compute::utils;
31using namespace arm_compute::graph::frontend;
32using namespace arm_compute::graph_utils;
33
34/** Example demonstrating how to implement ResNet12 network using the Compute Library's graph API */
35class GraphResNet12Example : public Example
36{
37public:
38 GraphResNet12Example()
39 : cmd_parser(), common_opts(cmd_parser), model_input_width(nullptr), model_input_height(nullptr), common_params(), graph(0, "ResNet12")
40 {
41 model_input_width = cmd_parser.add_option<SimpleOption<unsigned int>>("image-width", 192);
42 model_input_height = cmd_parser.add_option<SimpleOption<unsigned int>>("image-height", 128);
43
44 // Add model id option
45 model_input_width->set_help("Input image width.");
46 model_input_height->set_help("Input image height.");
47 }
48 GraphResNet12Example(const GraphResNet12Example &) = delete;
49 GraphResNet12Example &operator=(const GraphResNet12Example &) = delete;
50 GraphResNet12Example(GraphResNet12Example &&) = default; // NOLINT
51 GraphResNet12Example &operator=(GraphResNet12Example &&) = default; // NOLINT
52 ~GraphResNet12Example() override = default;
53 bool do_setup(int argc, char **argv) override
54 {
55 // Parse arguments
56 cmd_parser.parse(argc, argv);
57
58 // Consume common parameters
59 common_params = consume_common_graph_parameters(common_opts);
60
61 // Return when help menu is requested
62 if(common_params.help)
63 {
64 cmd_parser.print_help(argv[0]);
65 return false;
66 }
67
68 // Get input image width and height
69 const unsigned int image_width = model_input_width->value();
70 const unsigned int image_height = model_input_height->value();
71
72 // Checks
73 ARM_COMPUTE_EXIT_ON_MSG(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type), "QASYMM8 not supported for this graph");
74
75 // Print parameter values
76 std::cout << common_params << std::endl;
77 std::cout << "Image width: " << image_width << std::endl;
78 std::cout << "Image height: " << image_height << std::endl;
79
80 // Get trainable parameters data path
81 const std::string data_path = common_params.data_path;
82 const std::string model_path = "/cnn_data/resnet12_model/";
83
84 // Create a preprocessor object
85 std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<TFPreproccessor>();
86
87 // Create input descriptor
88 const TensorShape tensor_shape = permute_shape(TensorShape(image_width, image_height, 3U, 1U), DataLayout::NCHW, common_params.data_layout);
89 TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
90
91 // Set weights trained layout
92 const DataLayout weights_layout = DataLayout::NCHW;
93
94 graph << common_params.target
95 << common_params.fast_math_hint
96 << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor), false /* Do not convert to BGR */))
97 << ConvolutionLayer(
98 9U, 9U, 64U,
99 get_weights_accessor(data_path, "conv1_weights.npy", weights_layout),
100 get_weights_accessor(data_path, "conv1_biases.npy", weights_layout),
101 PadStrideInfo(1, 1, 4, 4))
102 .set_name("conv1/convolution")
103 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv1/Relu");
104
105 add_residual_block(data_path, "block1", weights_layout);
106 add_residual_block(data_path, "block2", weights_layout);
107 add_residual_block(data_path, "block3", weights_layout);
108 add_residual_block(data_path, "block4", weights_layout);
109
110 graph << ConvolutionLayer(
111 3U, 3U, 64U,
112 get_weights_accessor(data_path, "conv10_weights.npy", weights_layout),
113 get_weights_accessor(data_path, "conv10_biases.npy"),
114 PadStrideInfo(1, 1, 1, 1))
115 .set_name("conv10/convolution")
116 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv10/Relu")
117 << ConvolutionLayer(
118 3U, 3U, 64U,
119 get_weights_accessor(data_path, "conv11_weights.npy", weights_layout),
120 get_weights_accessor(data_path, "conv11_biases.npy"),
121 PadStrideInfo(1, 1, 1, 1))
122 .set_name("conv11/convolution")
123 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("conv11/Relu")
124 << ConvolutionLayer(
125 9U, 9U, 3U,
126 get_weights_accessor(data_path, "conv12_weights.npy", weights_layout),
127 get_weights_accessor(data_path, "conv12_biases.npy"),
128 PadStrideInfo(1, 1, 4, 4))
129 .set_name("conv12/convolution")
130 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH)).set_name("conv12/Tanh")
131 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 0.58f, 0.5f)).set_name("conv12/Linear")
132 << OutputLayer(arm_compute::support::cpp14::make_unique<DummyAccessor>(0));
133
134 // Finalize graph
135 GraphConfig config;
136 config.num_threads = common_params.threads;
137 config.use_tuner = common_params.enable_tuner;
138 graph.finalize(common_params.target, config);
139
140 return true;
141 }
142
143 void do_run() override
144 {
145 // Run graph
146 graph.run();
147 }
148
149private:
150 CommandLineParser cmd_parser;
151 CommonGraphOptions common_opts;
152 SimpleOption<unsigned int> *model_input_width{ nullptr };
153 SimpleOption<unsigned int> *model_input_height{ nullptr };
154 CommonGraphParams common_params;
155 Stream graph;
156
157 void add_residual_block(const std::string &data_path, const std::string &name, DataLayout weights_layout)
158 {
159 std::stringstream unit_path_ss;
160 unit_path_ss << data_path << name << "_";
161 std::stringstream unit_name_ss;
162 unit_name_ss << name << "/";
163
164 std::string unit_path = unit_path_ss.str();
165 std::string unit_name = unit_name_ss.str();
166
167 SubStream left(graph);
168 SubStream right(graph);
169
170 right << ConvolutionLayer(
171 3U, 3U, 64U,
172 get_weights_accessor(data_path, unit_path + "conv1_weights.npy", weights_layout),
173 get_weights_accessor(data_path, unit_path + "conv1_biases.npy", weights_layout),
174 PadStrideInfo(1, 1, 1, 1))
175 .set_name(unit_name + "conv1/convolution")
176 << BatchNormalizationLayer(
177 get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_mean.npy"),
178 get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_variance.npy"),
179 get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_gamma.npy"),
180 get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_beta.npy"),
181 0.0000100099996416f)
182 .set_name(unit_name + "conv1/BatchNorm")
183 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "conv1/Relu")
184
185 << ConvolutionLayer(
186 3U, 3U, 64U,
187 get_weights_accessor(data_path, unit_path + "conv2_weights.npy", weights_layout),
188 get_weights_accessor(data_path, unit_path + "conv2_biases.npy", weights_layout),
189 PadStrideInfo(1, 1, 1, 1))
190 .set_name(unit_name + "conv2/convolution")
191 << BatchNormalizationLayer(
192 get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_mean.npy"),
193 get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_variance.npy"),
194 get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_gamma.npy"),
195 get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_beta.npy"),
196 0.0000100099996416f)
197 .set_name(unit_name + "conv2/BatchNorm")
198 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "conv2/Relu");
199
200 graph << EltwiseLayer(std::move(left), std::move(right), EltwiseOperation::Add).set_name(unit_name + "add");
201 }
202};
203
204/** Main program for ResNet12
205 *
206 * Model is based on:
207 * https://arxiv.org/pdf/1709.01118.pdf
208 * "WESPE: Weakly Supervised Photo Enhancer for Digital Cameras"
209 * Andrey Ignatov, Nikolay Kobyshev, Kenneth Vanhoey, Radu Timofte, Luc Van Gool
210 *
211 * @note To list all the possible arguments execute the binary appended with the --help option
212 *
213 * @param[in] argc Number of arguments
214 * @param[in] argv Arguments
215 */
216int main(int argc, char **argv)
217{
218 return arm_compute::utils::run_example<GraphResNet12Example>(argc, argv);
219}