Alex Gilday | 8913d8d | 2018-02-15 11:07:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2017-2018 ARM Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/graph/Graph.h" |
| 25 | #include "arm_compute/graph/Nodes.h" |
| 26 | #include "support/ToolchainSupport.h" |
| 27 | #include "utils/GraphUtils.h" |
| 28 | #include "utils/Utils.h" |
| 29 | |
| 30 | #include <cstdlib> |
| 31 | |
| 32 | using namespace arm_compute::utils; |
| 33 | using namespace arm_compute::graph; |
| 34 | using namespace arm_compute::graph_utils; |
| 35 | |
| 36 | /** Example demonstrating how to implement Microsoft's ResNet50 network using the Compute Library's graph API |
| 37 | * |
| 38 | * @param[in] argc Number of arguments |
| 39 | * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL), [optional] Path to the weights folder, [optional] image, [optional] labels ) |
| 40 | */ |
| 41 | class GraphResNet50Example : public Example |
| 42 | { |
| 43 | public: |
| 44 | void do_setup(int argc, char **argv) override |
| 45 | { |
| 46 | std::string data_path; /* Path to the trainable data */ |
| 47 | std::string image; /* Image data */ |
| 48 | std::string label; /* Label data */ |
| 49 | |
| 50 | // Create a preprocessor object |
| 51 | const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } }; |
| 52 | std::unique_ptr<IPreprocessor> preprocessor = arm_compute::support::cpp14::make_unique<CaffePreproccessor>(mean_rgb, |
| 53 | false /* Do not convert to BGR */); |
| 54 | |
| 55 | // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON |
| 56 | const int int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0; |
| 57 | TargetHint target_hint = set_target_hint(int_target_hint); |
| 58 | |
| 59 | // Parse arguments |
| 60 | if(argc < 2) |
| 61 | { |
| 62 | // Print help |
| 63 | std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels]\n\n"; |
| 64 | std::cout << "No data folder provided: using random values\n\n"; |
| 65 | } |
| 66 | else if(argc == 2) |
| 67 | { |
| 68 | std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels]\n\n"; |
| 69 | std::cout << "No data folder provided: using random values\n\n"; |
| 70 | } |
| 71 | else if(argc == 3) |
| 72 | { |
| 73 | data_path = argv[2]; |
| 74 | std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels]\n\n"; |
| 75 | std::cout << "No image provided: using random values\n\n"; |
| 76 | } |
| 77 | else if(argc == 4) |
| 78 | { |
| 79 | data_path = argv[2]; |
| 80 | image = argv[3]; |
| 81 | std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels]\n\n"; |
| 82 | std::cout << "No text file with labels provided: skipping output accessor\n\n"; |
| 83 | } |
| 84 | else |
| 85 | { |
| 86 | data_path = argv[2]; |
| 87 | image = argv[3]; |
| 88 | label = argv[4]; |
| 89 | } |
| 90 | |
| 91 | // Initialize the graph |
| 92 | graph.graph_init(int_target_hint == 2); |
| 93 | |
| 94 | graph << target_hint |
| 95 | << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32), |
| 96 | get_input_accessor(image, std::move(preprocessor), false /* Do not convert to BGR */)) |
| 97 | << ConvolutionLayer( |
| 98 | 7U, 7U, 64U, |
| 99 | get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_weights.npy"), |
| 100 | std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), |
| 101 | PadStrideInfo(2, 2, 3, 3)) |
| 102 | << BatchNormalizationLayer( |
| 103 | get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_BatchNorm_moving_mean.npy"), |
| 104 | get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_BatchNorm_moving_variance.npy"), |
| 105 | get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_BatchNorm_gamma.npy"), |
| 106 | get_weights_accessor(data_path, "/cnn_data/resnet50_model/conv1_BatchNorm_beta.npy"), |
| 107 | 0.0000100099996416f) |
| 108 | << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) |
| 109 | << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR))); |
| 110 | |
| 111 | add_residual_block(data_path, "block1", 64, 3, 2); |
| 112 | add_residual_block(data_path, "block2", 128, 4, 2); |
| 113 | add_residual_block(data_path, "block3", 256, 6, 2); |
| 114 | add_residual_block(data_path, "block4", 512, 3, 1); |
| 115 | |
| 116 | graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG)) |
| 117 | << ConvolutionLayer( |
| 118 | 1U, 1U, 1000U, |
| 119 | get_weights_accessor(data_path, "/cnn_data/resnet50_model/logits_weights.npy"), |
| 120 | get_weights_accessor(data_path, "/cnn_data/resnet50_model/logits_biases.npy"), |
| 121 | PadStrideInfo(1, 1, 0, 0)) |
| 122 | << FlattenLayer() |
| 123 | << SoftmaxLayer() |
| 124 | << Tensor(get_output_accessor(label, 5)); |
| 125 | } |
| 126 | void do_run() override |
| 127 | { |
| 128 | // Run graph |
| 129 | graph.run(); |
| 130 | } |
| 131 | |
| 132 | private: |
| 133 | Graph graph{}; |
| 134 | |
| 135 | void add_residual_block(const std::string &data_path, const std::string &name, unsigned int base_depth, unsigned int num_units, unsigned int stride) |
| 136 | { |
| 137 | for(unsigned int i = 0; i < num_units; ++i) |
| 138 | { |
| 139 | std::stringstream unit; |
| 140 | unit << "/cnn_data/resnet50_model/" << name << "_unit_" << (i + 1) << "_bottleneck_v1_"; |
| 141 | std::string unit_name = unit.str(); |
| 142 | |
| 143 | unsigned int middle_stride = 1; |
| 144 | |
| 145 | if(i == (num_units - 1)) |
| 146 | { |
| 147 | middle_stride = stride; |
| 148 | } |
| 149 | |
| 150 | SubGraph right; |
| 151 | right << ConvolutionLayer( |
| 152 | 1U, 1U, base_depth, |
| 153 | get_weights_accessor(data_path, unit_name + "conv1_weights.npy"), |
| 154 | std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), |
| 155 | PadStrideInfo(1, 1, 0, 0)) |
| 156 | << BatchNormalizationLayer( |
| 157 | get_weights_accessor(data_path, unit_name + "conv1_BatchNorm_moving_mean.npy"), |
| 158 | get_weights_accessor(data_path, unit_name + "conv1_BatchNorm_moving_variance.npy"), |
| 159 | get_weights_accessor(data_path, unit_name + "conv1_BatchNorm_gamma.npy"), |
| 160 | get_weights_accessor(data_path, unit_name + "conv1_BatchNorm_beta.npy"), |
| 161 | 0.0000100099996416f) |
| 162 | << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) |
| 163 | |
| 164 | << ConvolutionLayer( |
| 165 | 3U, 3U, base_depth, |
| 166 | get_weights_accessor(data_path, unit_name + "conv2_weights.npy"), |
| 167 | std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), |
| 168 | PadStrideInfo(middle_stride, middle_stride, 1, 1)) |
| 169 | << BatchNormalizationLayer( |
| 170 | get_weights_accessor(data_path, unit_name + "conv2_BatchNorm_moving_mean.npy"), |
| 171 | get_weights_accessor(data_path, unit_name + "conv2_BatchNorm_moving_variance.npy"), |
| 172 | get_weights_accessor(data_path, unit_name + "conv2_BatchNorm_gamma.npy"), |
| 173 | get_weights_accessor(data_path, unit_name + "conv2_BatchNorm_beta.npy"), |
| 174 | 0.0000100099996416f) |
| 175 | << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) |
| 176 | |
| 177 | << ConvolutionLayer( |
| 178 | 1U, 1U, base_depth * 4, |
| 179 | get_weights_accessor(data_path, unit_name + "conv3_weights.npy"), |
| 180 | std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), |
| 181 | PadStrideInfo(1, 1, 0, 0)) |
| 182 | << BatchNormalizationLayer( |
| 183 | get_weights_accessor(data_path, unit_name + "conv3_BatchNorm_moving_mean.npy"), |
| 184 | get_weights_accessor(data_path, unit_name + "conv3_BatchNorm_moving_variance.npy"), |
| 185 | get_weights_accessor(data_path, unit_name + "conv3_BatchNorm_gamma.npy"), |
| 186 | get_weights_accessor(data_path, unit_name + "conv3_BatchNorm_beta.npy"), |
| 187 | 0.0000100099996416f); |
| 188 | |
| 189 | if(i == 0) |
| 190 | { |
| 191 | SubGraph left; |
| 192 | left << ConvolutionLayer( |
| 193 | 1U, 1U, base_depth * 4, |
| 194 | get_weights_accessor(data_path, unit_name + "shortcut_weights.npy"), |
| 195 | std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), |
| 196 | PadStrideInfo(1, 1, 0, 0)) |
| 197 | << BatchNormalizationLayer( |
| 198 | get_weights_accessor(data_path, unit_name + "shortcut_BatchNorm_moving_mean.npy"), |
| 199 | get_weights_accessor(data_path, unit_name + "shortcut_BatchNorm_moving_variance.npy"), |
| 200 | get_weights_accessor(data_path, unit_name + "shortcut_BatchNorm_gamma.npy"), |
| 201 | get_weights_accessor(data_path, unit_name + "shortcut_BatchNorm_beta.npy"), |
| 202 | 0.0000100099996416f); |
| 203 | |
| 204 | graph << ResidualLayer(std::move(left), std::move(right)); |
| 205 | } |
| 206 | else if(middle_stride > 1) |
| 207 | { |
| 208 | SubGraph left; |
| 209 | left << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 1, PadStrideInfo(middle_stride, middle_stride, 0, 0), true)) |
| 210 | // TODO (alegil01) : Remove once we understand why a single node graph does not run in CL |
| 211 | << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f)); |
| 212 | |
| 213 | graph << ResidualLayer(std::move(left), std::move(right)); |
| 214 | } |
| 215 | else |
| 216 | { |
| 217 | graph << ResidualLayer(std::move(right)); |
| 218 | } |
| 219 | |
| 220 | graph << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)); |
| 221 | } |
| 222 | } |
| 223 | }; |
| 224 | |
| 225 | /** Main program for ResNet50 |
| 226 | * |
| 227 | * @param[in] argc Number of arguments |
| 228 | * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL), [optional] Path to the weights folder, [optional] image, [optional] labels ) |
| 229 | */ |
| 230 | int main(int argc, char **argv) |
| 231 | { |
| 232 | return arm_compute::utils::run_example<GraphResNet50Example>(argc, argv); |
| 233 | } |