blob: 1bc8ad9a0c1b0d3a7d8376a68863691f65703768 [file] [log] [blame]
Georgios Pinitas93dcd832017-10-27 12:48:20 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENET_H__
25#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENET_H__
26
27#include "tests/AssetsLibrary.h"
28#include "tests/Globals.h"
29#include "tests/Utils.h"
30
31#include "utils/Utils.h"
32
33#include <memory>
34
35using namespace arm_compute;
36using namespace arm_compute::test;
37
38namespace arm_compute
39{
40namespace test
41{
42namespace networks
43{
44/** MobileNet model object */
45template <typename TensorType,
46 typename Accessor,
47 typename ActivationLayerFunction,
48 typename ConvolutionLayerFunction,
49 typename DirectConvolutionLayerFunction,
Giorgio Arena04a8f8c2017-11-23 11:45:24 +000050 typename DepthwiseConvolutionLayerFunction,
Georgios Pinitas93dcd832017-10-27 12:48:20 +010051 typename ReshapeFunction,
52 typename PoolingLayerFunction>
53class MobileNetNetwork
54{
55public:
56 void init(int batches)
57 {
58 _batches = batches;
59
60 // Initialize input, output
61 input.allocator()->init(TensorInfo(TensorShape(224U, 224U, 3U, _batches), 1, DataType::F32));
62 output.allocator()->init(TensorInfo(TensorShape(11U, _batches), 1, DataType::F32));
63 // Initialize weights and biases
64 w_conv3x3.allocator()->init(TensorInfo(TensorShape(3U, 3U, 3U, 16U), 1, DataType::F32));
65 b_conv3x3.allocator()->init(TensorInfo(TensorShape(16U), 1, DataType::F32));
66 depthwise_conv_block_init(0, 16, 16);
67 depthwise_conv_block_init(1, 16, 32);
68 depthwise_conv_block_init(2, 32, 32);
69 depthwise_conv_block_init(3, 32, 64);
70 depthwise_conv_block_init(4, 64, 64);
71 depthwise_conv_block_init(5, 64, 128);
72 depthwise_conv_block_init(6, 128, 128);
73 depthwise_conv_block_init(7, 128, 128);
74 depthwise_conv_block_init(8, 128, 128);
75 depthwise_conv_block_init(9, 128, 128);
76 depthwise_conv_block_init(10, 128, 128);
77 depthwise_conv_block_init(11, 128, 256);
78 depthwise_conv_block_init(12, 256, 256);
79 w_conv[13].allocator()->init(TensorInfo(TensorShape(1U, 1U, 256U, 11U), 1, DataType::F32));
80 b_conv[13].allocator()->init(TensorInfo(TensorShape(11U), 1, DataType::F32));
81 }
82
83 /** Build the model. */
84 void build()
85 {
86 // Configure Layers
87 conv3x3.configure(&input, &w_conv3x3, &b_conv3x3, &conv_out[0], PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR));
88 conv3x3_act.configure(&conv_out[0], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
89 depthwise_conv_block_build(0, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0));
90 depthwise_conv_block_build(1, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
91 depthwise_conv_block_build(2, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
92 depthwise_conv_block_build(3, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
93 depthwise_conv_block_build(4, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
94 depthwise_conv_block_build(5, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
95 depthwise_conv_block_build(6, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
96 depthwise_conv_block_build(7, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
97 depthwise_conv_block_build(8, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
98 depthwise_conv_block_build(9, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
99 depthwise_conv_block_build(10, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
100 depthwise_conv_block_build(11, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
101 depthwise_conv_block_build(12, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
102 pool.configure(&conv_out[13], &pool_out, PoolingLayerInfo(PoolingType::AVG, 7, PadStrideInfo(2, 2, 0, 0)));
103 conv1x1[13].configure(&pool_out, &w_conv[13], &b_conv[13], &conv_out[14], PadStrideInfo(1, 1, 0, 0));
104 logistic.configure(&conv_out[14], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
105 reshape.configure(&conv_out[14], &output);
106 }
107
108 void allocate()
109 {
110 input.allocator()->allocate();
111 output.allocator()->allocate();
112
113 w_conv3x3.allocator()->allocate();
114 b_conv3x3.allocator()->allocate();
115 for(unsigned int i = 0; i < w_conv.size(); ++i)
116 {
117 w_conv[i].allocator()->allocate();
118 b_conv[i].allocator()->allocate();
119 }
120 for(unsigned int i = 0; i < w_dwc.size(); ++i)
121 {
122 w_dwc[i].allocator()->allocate();
123 b_dwc[i].allocator()->allocate();
124 }
125 for(auto &o : conv_out)
126 {
127 o.allocator()->allocate();
128 }
129 for(auto &o : dwc_out)
130 {
131 o.allocator()->allocate();
132 }
133 pool_out.allocator()->allocate();
134 }
135
136 /** Fills the trainable parameters and input with random data. */
137 void fill_random()
138 {
139 unsigned int seed_idx = 0;
140 std::uniform_real_distribution<> distribution(-1, 1);
141 library->fill(Accessor(input), distribution, seed_idx++);
142
143 library->fill(Accessor(w_conv3x3), distribution, seed_idx++);
144 library->fill(Accessor(b_conv3x3), distribution, seed_idx++);
145 for(unsigned int i = 0; i < w_conv.size(); ++i)
146 {
147 library->fill(Accessor(w_conv[i]), distribution, seed_idx++);
148 library->fill(Accessor(b_conv[i]), distribution, seed_idx++);
149 }
150 for(unsigned int i = 0; i < w_dwc.size(); ++i)
151 {
152 library->fill(Accessor(w_dwc[i]), distribution, seed_idx++);
153 library->fill(Accessor(b_dwc[i]), distribution, seed_idx++);
154 }
155 }
156
157 /** Feed input to network from file.
158 *
159 * @param name File name of containing the input data.
160 */
161 void feed(std::string name)
162 {
163 library->fill_layer_data(Accessor(input), name);
164 }
165
166 /** Get the classification results.
167 *
168 * @return Vector containing the classified labels
169 */
170 std::vector<unsigned int> get_classifications()
171 {
172 std::vector<unsigned int> classified_labels;
173 Accessor output_accessor(output);
174
175 Window window;
176 window.set(Window::DimX, Window::Dimension(0, 1, 1));
177 for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
178 {
179 window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
180 }
181
182 execute_window_loop(window, [&](const Coordinates & id)
183 {
184 int max_idx = 0;
185 float val = 0;
186 const void *const out_ptr = output_accessor(id);
187 for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
188 {
189 float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
190 if(curr_val > val)
191 {
192 max_idx = l;
193 val = curr_val;
194 }
195 }
196 classified_labels.push_back(max_idx);
197 });
198 return classified_labels;
199 }
200
201 /** Clear all allocated memory from the tensor objects */
202 void clear()
203 {
204 input.allocator()->free();
205 output.allocator()->free();
206
207 w_conv3x3.allocator()->free();
208 b_conv3x3.allocator()->free();
209 for(unsigned int i = 0; i < w_conv.size(); ++i)
210 {
211 w_conv[i].allocator()->free();
212 b_conv[i].allocator()->free();
213 }
214 for(unsigned int i = 0; i < w_dwc.size(); ++i)
215 {
216 w_dwc[i].allocator()->free();
217 b_dwc[i].allocator()->free();
218 }
219 for(auto &o : conv_out)
220 {
221 o.allocator()->free();
222 }
223 for(auto &o : dwc_out)
224 {
225 o.allocator()->free();
226 }
227 pool_out.allocator()->free();
228 }
229
230 /** Runs the model */
231 void run()
232 {
233 conv3x3.run();
234 conv3x3_act.run();
235 depthwise_conv_block_run(0);
236 depthwise_conv_block_run(1);
237 depthwise_conv_block_run(2);
238 depthwise_conv_block_run(3);
239 depthwise_conv_block_run(4);
240 depthwise_conv_block_run(5);
241 depthwise_conv_block_run(6);
242 depthwise_conv_block_run(7);
243 depthwise_conv_block_run(8);
244 depthwise_conv_block_run(9);
245 depthwise_conv_block_run(10);
246 depthwise_conv_block_run(11);
247 depthwise_conv_block_run(12);
248 pool.run();
249 conv1x1[13].run();
250 logistic.run();
251 reshape.run();
252 }
253
254private:
255 void depthwise_conv_block_init(unsigned int idx, unsigned int ifm, unsigned int ofm)
256 {
257 w_dwc[idx].allocator()->init(TensorInfo(TensorShape(3U, 3U, ifm), 1, DataType::F32));
258 b_dwc[idx].allocator()->init(TensorInfo(TensorShape(ifm), 1, DataType::F32));
259 w_conv[idx].allocator()->init(TensorInfo(TensorShape(1U, 1U, ifm, ofm), 1, DataType::F32));
260 b_conv[idx].allocator()->init(TensorInfo(TensorShape(ofm), 1, DataType::F32));
261 }
262 void depthwise_conv_block_build(unsigned int idx, PadStrideInfo dwc_ps, PadStrideInfo conv_ps)
263 {
264 dwc3x3[idx].configure(&conv_out[idx], &w_dwc[idx], &b_dwc[idx], &dwc_out[idx], dwc_ps);
265 act[2 * idx].configure(&dwc_out[idx], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
266 conv1x1[idx].configure(&dwc_out[idx], &w_conv[idx], &b_conv[idx], &conv_out[idx + 1], conv_ps);
267 act[2 * idx + 1].configure(&conv_out[idx], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
268 }
269 void depthwise_conv_block_run(unsigned int idx)
270 {
271 dwc3x3[idx].run();
272 act[2 * idx].run();
273 conv1x1[idx].run();
274 act[2 * idx + 1].run();
275 }
276
277private:
278 unsigned int _batches{ 0 };
279
280 ConvolutionLayerFunction conv3x3{};
281 ActivationLayerFunction conv3x3_act{};
Giorgio Arena04a8f8c2017-11-23 11:45:24 +0000282 std::array<ActivationLayerFunction, 26> act{ {} };
283 std::array<DirectConvolutionLayerFunction, 14> conv1x1{ {} };
284 std::array<DepthwiseConvolutionLayerFunction, 13> dwc3x3{ {} };
Georgios Pinitas93dcd832017-10-27 12:48:20 +0100285 PoolingLayerFunction pool{};
286 ActivationLayerFunction logistic{};
287 ReshapeFunction reshape{};
288
289 TensorType w_conv3x3{}, b_conv3x3{};
290 std::array<TensorType, 14> w_conv{ {} }, b_conv{ {} };
291 std::array<TensorType, 13> w_dwc{ {} }, b_dwc{ {} };
292
293 TensorType input{}, output{};
294
295 std::array<TensorType, 15> conv_out{ {} };
296 std::array<TensorType, 13> dwc_out{ {} };
297 TensorType pool_out{};
298};
299} // namespace networks
300} // namespace test
301} // namespace arm_compute
302#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENET_H__