blob: 472bf02b47ecf7d4b3c54de196fa71fb86104c73 [file] [log] [blame]
Georgios Pinitas7b2f0262018-08-14 16:40:18 +01001/*
SiCong Li4841c972021-02-03 12:17:35 +00002 * Copyright (c) 2018-2021 Arm Limited.
Georgios Pinitas7b2f0262018-08-14 16:40:18 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/graph.h"
25#include "support/ToolchainSupport.h"
26#include "utils/CommonGraphOptions.h"
27#include "utils/GraphUtils.h"
28#include "utils/Utils.h"
29
30using namespace arm_compute::utils;
31using namespace arm_compute::graph::frontend;
32using namespace arm_compute::graph_utils;
33
Georgios Pinitas108ab0b2018-09-14 18:35:11 +010034/** Example demonstrating how to implement ResNetV2_50 network using the Compute Library's graph API */
Georgios Pinitas7b2f0262018-08-14 16:40:18 +010035class GraphResNetV2_50Example : public Example
36{
37public:
38 GraphResNetV2_50Example()
39 : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "ResNetV2_50")
40 {
41 }
42 bool do_setup(int argc, char **argv) override
43 {
44 // Parse arguments
45 cmd_parser.parse(argc, argv);
Georgios Pinitascd60a5f2019-08-21 17:06:54 +010046 cmd_parser.validate();
Georgios Pinitas7b2f0262018-08-14 16:40:18 +010047
48 // Consume common parameters
49 common_params = consume_common_graph_parameters(common_opts);
50
51 // Return when help menu is requested
52 if(common_params.help)
53 {
54 cmd_parser.print_help(argv[0]);
55 return false;
56 }
57
Georgios Pinitas7b2f0262018-08-14 16:40:18 +010058 // Print parameter values
59 std::cout << common_params << std::endl;
60
61 // Get trainable parameters data path
62 std::string data_path = common_params.data_path;
63 std::string model_path = "/cnn_data/resnet_v2_50_model/";
64 if(!data_path.empty())
65 {
66 data_path += model_path;
67 }
68
69 // Create a preprocessor object
Georgios Pinitas40f51a62020-11-21 03:04:18 +000070 std::unique_ptr<IPreprocessor> preprocessor = std::make_unique<TFPreproccessor>();
Georgios Pinitas7b2f0262018-08-14 16:40:18 +010071
72 // Create input descriptor
Sang-Hoon Park11fedda2020-01-15 14:44:04 +000073 const auto operation_layout = common_params.data_layout;
74 const TensorShape tensor_shape = permute_shape(TensorShape(224U, 224U, 3U, 1U), DataLayout::NCHW, operation_layout);
75 TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(operation_layout);
Georgios Pinitas7b2f0262018-08-14 16:40:18 +010076
77 // Set weights trained layout
78 const DataLayout weights_layout = DataLayout::NCHW;
79
80 graph << common_params.target
81 << common_params.fast_math_hint
82 << InputLayer(input_descriptor, get_input_accessor(common_params, std::move(preprocessor), false /* Do not convert to BGR */))
83 << ConvolutionLayer(
84 7U, 7U, 64U,
85 get_weights_accessor(data_path, "conv1_weights.npy", weights_layout),
86 get_weights_accessor(data_path, "conv1_biases.npy", weights_layout),
87 PadStrideInfo(2, 2, 3, 3))
88 .set_name("conv1/convolution")
Sang-Hoon Park11fedda2020-01-15 14:44:04 +000089 << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, operation_layout, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR))).set_name("pool1/MaxPool");
Georgios Pinitas7b2f0262018-08-14 16:40:18 +010090
91 add_residual_block(data_path, "block1", weights_layout, 64, 3, 2);
92 add_residual_block(data_path, "block2", weights_layout, 128, 4, 2);
93 add_residual_block(data_path, "block3", weights_layout, 256, 6, 2);
94 add_residual_block(data_path, "block4", weights_layout, 512, 3, 1);
95
96 graph << BatchNormalizationLayer(
97 get_weights_accessor(data_path, "postnorm_moving_mean.npy"),
98 get_weights_accessor(data_path, "postnorm_moving_variance.npy"),
99 get_weights_accessor(data_path, "postnorm_gamma.npy"),
100 get_weights_accessor(data_path, "postnorm_beta.npy"),
101 0.000009999999747378752f)
102 .set_name("postnorm/BatchNorm")
103 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("postnorm/Relu")
Sang-Hoon Park11fedda2020-01-15 14:44:04 +0000104 << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, operation_layout)).set_name("pool5")
Georgios Pinitas7b2f0262018-08-14 16:40:18 +0100105 << ConvolutionLayer(
106 1U, 1U, 1001U,
107 get_weights_accessor(data_path, "logits_weights.npy", weights_layout),
108 get_weights_accessor(data_path, "logits_biases.npy"),
109 PadStrideInfo(1, 1, 0, 0))
110 .set_name("logits/convolution")
111 << FlattenLayer().set_name("predictions/Reshape")
112 << SoftmaxLayer().set_name("predictions/Softmax")
113 << OutputLayer(get_output_accessor(common_params, 5));
114
115 // Finalize graph
116 GraphConfig config;
Georgios Pinitasf4261ad2019-12-02 11:58:19 +0000117 config.num_threads = common_params.threads;
118 config.use_tuner = common_params.enable_tuner;
119 config.tuner_mode = common_params.tuner_mode;
120 config.tuner_file = common_params.tuner_file;
SiCong Li4841c972021-02-03 12:17:35 +0000121 config.mlgo_file = common_params.mlgo_file;
Georgios Pinitasf4261ad2019-12-02 11:58:19 +0000122 config.convert_to_uint8 = (common_params.data_type == DataType::QASYMM8);
Michele Di Giorgio1df9cca2019-01-17 13:20:32 +0000123
Georgios Pinitas7b2f0262018-08-14 16:40:18 +0100124 graph.finalize(common_params.target, config);
125
126 return true;
127 }
128
129 void do_run() override
130 {
131 // Run graph
132 graph.run();
133 }
134
135private:
136 CommandLineParser cmd_parser;
137 CommonGraphOptions common_opts;
138 CommonGraphParams common_params;
139 Stream graph;
140
141 void add_residual_block(const std::string &data_path, const std::string &name, DataLayout weights_layout,
142 unsigned int base_depth, unsigned int num_units, unsigned int stride)
143 {
144 for(unsigned int i = 0; i < num_units; ++i)
145 {
146 // Generate unit names
147 std::stringstream unit_path_ss;
148 unit_path_ss << name << "_unit_" << (i + 1) << "_bottleneck_v2_";
149 std::stringstream unit_name_ss;
150 unit_name_ss << name << "/unit" << (i + 1) << "/bottleneck_v2/";
151
152 std::string unit_path = unit_path_ss.str();
153 std::string unit_name = unit_name_ss.str();
154
155 const TensorShape last_shape = graph.graph().node(graph.tail_node())->output(0)->desc().shape;
156 unsigned int depth_in = last_shape[arm_compute::get_data_layout_dimension_index(common_params.data_layout, DataLayoutDimension::CHANNEL)];
157 unsigned int depth_out = base_depth * 4;
158
159 // All units have stride 1 apart from last one
160 unsigned int middle_stride = (i == (num_units - 1)) ? stride : 1;
161
162 // Preact
163 SubStream preact(graph);
164 preact << BatchNormalizationLayer(
165 get_weights_accessor(data_path, unit_path + "preact_moving_mean.npy"),
166 get_weights_accessor(data_path, unit_path + "preact_moving_variance.npy"),
167 get_weights_accessor(data_path, unit_path + "preact_gamma.npy"),
168 get_weights_accessor(data_path, unit_path + "preact_beta.npy"),
169 0.000009999999747378752f)
170 .set_name(unit_name + "preact/BatchNorm")
171 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "preact/Relu");
172
173 // Create bottleneck path
174 SubStream shortcut(graph);
175 if(depth_in == depth_out)
176 {
177 if(middle_stride != 1)
178 {
Sang-Hoon Park11fedda2020-01-15 14:44:04 +0000179 shortcut << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 1, common_params.data_layout, PadStrideInfo(middle_stride, middle_stride, 0, 0), true)).set_name(unit_name + "shortcut/MaxPool");
Georgios Pinitas7b2f0262018-08-14 16:40:18 +0100180 }
181 }
182 else
183 {
184 shortcut.forward_tail(preact.tail_node());
185 shortcut << ConvolutionLayer(
186 1U, 1U, depth_out,
187 get_weights_accessor(data_path, unit_path + "shortcut_weights.npy", weights_layout),
188 get_weights_accessor(data_path, unit_path + "shortcut_biases.npy", weights_layout),
189 PadStrideInfo(1, 1, 0, 0))
190 .set_name(unit_name + "shortcut/convolution");
191 }
192
193 // Create residual path
194 SubStream residual(preact);
195 residual << ConvolutionLayer(
196 1U, 1U, base_depth,
197 get_weights_accessor(data_path, unit_path + "conv1_weights.npy", weights_layout),
198 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
199 PadStrideInfo(1, 1, 0, 0))
200 .set_name(unit_name + "conv1/convolution")
201 << BatchNormalizationLayer(
202 get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_mean.npy"),
203 get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_moving_variance.npy"),
204 get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_gamma.npy"),
205 get_weights_accessor(data_path, unit_path + "conv1_BatchNorm_beta.npy"),
206 0.000009999999747378752f)
207 .set_name(unit_name + "conv1/BatchNorm")
208 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "conv1/Relu")
209 << ConvolutionLayer(
210 3U, 3U, base_depth,
211 get_weights_accessor(data_path, unit_path + "conv2_weights.npy", weights_layout),
212 std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
213 PadStrideInfo(middle_stride, middle_stride, 1, 1))
214 .set_name(unit_name + "conv2/convolution")
215 << BatchNormalizationLayer(
216 get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_mean.npy"),
217 get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_moving_variance.npy"),
218 get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_gamma.npy"),
219 get_weights_accessor(data_path, unit_path + "conv2_BatchNorm_beta.npy"),
220 0.000009999999747378752f)
221 .set_name(unit_name + "conv2/BatchNorm")
222 << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(unit_name + "conv1/Relu")
223 << ConvolutionLayer(
224 1U, 1U, depth_out,
225 get_weights_accessor(data_path, unit_path + "conv3_weights.npy", weights_layout),
226 get_weights_accessor(data_path, unit_path + "conv3_biases.npy", weights_layout),
227 PadStrideInfo(1, 1, 0, 0))
228 .set_name(unit_name + "conv3/convolution");
229
Georgios Pinitas427bbbf2018-08-28 13:32:02 +0100230 graph << EltwiseLayer(std::move(shortcut), std::move(residual), EltwiseOperation::Add).set_name(unit_name + "add");
Georgios Pinitas7b2f0262018-08-14 16:40:18 +0100231 }
232 }
233};
234
235/** Main program for ResNetV2_50
236 *
Georgios Pinitasbdbbbe82018-11-07 16:06:47 +0000237 * Model is based on:
238 * https://arxiv.org/abs/1603.05027
239 * "Identity Mappings in Deep Residual Networks"
240 * Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
241 *
Georgios Pinitas588ebc52018-12-21 13:39:07 +0000242 * Provenance: download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz
243 *
Georgios Pinitas7b2f0262018-08-14 16:40:18 +0100244 * @note To list all the possible arguments execute the binary appended with the --help option
245 *
246 * @param[in] argc Number of arguments
247 * @param[in] argv Arguments
248 */
249int main(int argc, char **argv)
250{
251 return arm_compute::utils::run_example<GraphResNetV2_50Example>(argc, argv);
252}