blob: ec054b237e129df70f7b49f82bad3bab5c72de5d [file] [log] [blame]
Georgios Pinitas93dcd832017-10-27 12:48:20 +01001/*
Alex Gildayc357c472018-03-21 13:54:09 +00002 * Copyright (c) 2017-2018 ARM Limited.
Georgios Pinitas93dcd832017-10-27 12:48:20 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENET_H__
25#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENET_H__
26
27#include "tests/AssetsLibrary.h"
28#include "tests/Globals.h"
29#include "tests/Utils.h"
30
31#include "utils/Utils.h"
32
33#include <memory>
34
35using namespace arm_compute;
36using namespace arm_compute::test;
37
38namespace arm_compute
39{
40namespace test
41{
42namespace networks
43{
44/** MobileNet model object */
45template <typename TensorType,
46 typename Accessor,
47 typename ActivationLayerFunction,
48 typename ConvolutionLayerFunction,
49 typename DirectConvolutionLayerFunction,
Giorgio Arena04a8f8c2017-11-23 11:45:24 +000050 typename DepthwiseConvolutionLayerFunction,
Georgios Pinitas93dcd832017-10-27 12:48:20 +010051 typename ReshapeFunction,
52 typename PoolingLayerFunction>
53class MobileNetNetwork
54{
55public:
Alex Gildayc357c472018-03-21 13:54:09 +000056 /** Initialize the network.
57 *
58 * @param[in] batches Number of batches.
59 */
Georgios Pinitas93dcd832017-10-27 12:48:20 +010060 void init(int batches)
61 {
62 _batches = batches;
63
64 // Initialize input, output
65 input.allocator()->init(TensorInfo(TensorShape(224U, 224U, 3U, _batches), 1, DataType::F32));
66 output.allocator()->init(TensorInfo(TensorShape(11U, _batches), 1, DataType::F32));
67 // Initialize weights and biases
68 w_conv3x3.allocator()->init(TensorInfo(TensorShape(3U, 3U, 3U, 16U), 1, DataType::F32));
69 b_conv3x3.allocator()->init(TensorInfo(TensorShape(16U), 1, DataType::F32));
70 depthwise_conv_block_init(0, 16, 16);
71 depthwise_conv_block_init(1, 16, 32);
72 depthwise_conv_block_init(2, 32, 32);
73 depthwise_conv_block_init(3, 32, 64);
74 depthwise_conv_block_init(4, 64, 64);
75 depthwise_conv_block_init(5, 64, 128);
76 depthwise_conv_block_init(6, 128, 128);
77 depthwise_conv_block_init(7, 128, 128);
78 depthwise_conv_block_init(8, 128, 128);
79 depthwise_conv_block_init(9, 128, 128);
80 depthwise_conv_block_init(10, 128, 128);
81 depthwise_conv_block_init(11, 128, 256);
82 depthwise_conv_block_init(12, 256, 256);
83 w_conv[13].allocator()->init(TensorInfo(TensorShape(1U, 1U, 256U, 11U), 1, DataType::F32));
84 b_conv[13].allocator()->init(TensorInfo(TensorShape(11U), 1, DataType::F32));
85 }
86
87 /** Build the model. */
88 void build()
89 {
90 // Configure Layers
91 conv3x3.configure(&input, &w_conv3x3, &b_conv3x3, &conv_out[0], PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR));
92 conv3x3_act.configure(&conv_out[0], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
93 depthwise_conv_block_build(0, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0));
94 depthwise_conv_block_build(1, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
95 depthwise_conv_block_build(2, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
96 depthwise_conv_block_build(3, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
97 depthwise_conv_block_build(4, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
98 depthwise_conv_block_build(5, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
99 depthwise_conv_block_build(6, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
100 depthwise_conv_block_build(7, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
101 depthwise_conv_block_build(8, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
102 depthwise_conv_block_build(9, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
103 depthwise_conv_block_build(10, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
104 depthwise_conv_block_build(11, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
105 depthwise_conv_block_build(12, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::FLOOR), PadStrideInfo(1, 1, 0, 0));
106 pool.configure(&conv_out[13], &pool_out, PoolingLayerInfo(PoolingType::AVG, 7, PadStrideInfo(2, 2, 0, 0)));
107 conv1x1[13].configure(&pool_out, &w_conv[13], &b_conv[13], &conv_out[14], PadStrideInfo(1, 1, 0, 0));
108 logistic.configure(&conv_out[14], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC));
109 reshape.configure(&conv_out[14], &output);
110 }
111
Alex Gildayc357c472018-03-21 13:54:09 +0000112 /** Allocate the network. */
Georgios Pinitas93dcd832017-10-27 12:48:20 +0100113 void allocate()
114 {
115 input.allocator()->allocate();
116 output.allocator()->allocate();
117
118 w_conv3x3.allocator()->allocate();
119 b_conv3x3.allocator()->allocate();
120 for(unsigned int i = 0; i < w_conv.size(); ++i)
121 {
122 w_conv[i].allocator()->allocate();
123 b_conv[i].allocator()->allocate();
124 }
125 for(unsigned int i = 0; i < w_dwc.size(); ++i)
126 {
127 w_dwc[i].allocator()->allocate();
128 b_dwc[i].allocator()->allocate();
129 }
130 for(auto &o : conv_out)
131 {
132 o.allocator()->allocate();
133 }
134 for(auto &o : dwc_out)
135 {
136 o.allocator()->allocate();
137 }
138 pool_out.allocator()->allocate();
139 }
140
141 /** Fills the trainable parameters and input with random data. */
142 void fill_random()
143 {
144 unsigned int seed_idx = 0;
145 std::uniform_real_distribution<> distribution(-1, 1);
146 library->fill(Accessor(input), distribution, seed_idx++);
147
148 library->fill(Accessor(w_conv3x3), distribution, seed_idx++);
149 library->fill(Accessor(b_conv3x3), distribution, seed_idx++);
150 for(unsigned int i = 0; i < w_conv.size(); ++i)
151 {
152 library->fill(Accessor(w_conv[i]), distribution, seed_idx++);
153 library->fill(Accessor(b_conv[i]), distribution, seed_idx++);
154 }
155 for(unsigned int i = 0; i < w_dwc.size(); ++i)
156 {
157 library->fill(Accessor(w_dwc[i]), distribution, seed_idx++);
158 library->fill(Accessor(b_dwc[i]), distribution, seed_idx++);
159 }
160 }
161
162 /** Feed input to network from file.
163 *
164 * @param name File name of containing the input data.
165 */
166 void feed(std::string name)
167 {
168 library->fill_layer_data(Accessor(input), name);
169 }
170
171 /** Get the classification results.
172 *
173 * @return Vector containing the classified labels
174 */
175 std::vector<unsigned int> get_classifications()
176 {
177 std::vector<unsigned int> classified_labels;
178 Accessor output_accessor(output);
179
180 Window window;
181 window.set(Window::DimX, Window::Dimension(0, 1, 1));
182 for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
183 {
184 window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
185 }
186
187 execute_window_loop(window, [&](const Coordinates & id)
188 {
189 int max_idx = 0;
190 float val = 0;
191 const void *const out_ptr = output_accessor(id);
192 for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
193 {
194 float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
195 if(curr_val > val)
196 {
197 max_idx = l;
198 val = curr_val;
199 }
200 }
201 classified_labels.push_back(max_idx);
202 });
203 return classified_labels;
204 }
205
206 /** Clear all allocated memory from the tensor objects */
207 void clear()
208 {
209 input.allocator()->free();
210 output.allocator()->free();
211
212 w_conv3x3.allocator()->free();
213 b_conv3x3.allocator()->free();
214 for(unsigned int i = 0; i < w_conv.size(); ++i)
215 {
216 w_conv[i].allocator()->free();
217 b_conv[i].allocator()->free();
218 }
219 for(unsigned int i = 0; i < w_dwc.size(); ++i)
220 {
221 w_dwc[i].allocator()->free();
222 b_dwc[i].allocator()->free();
223 }
224 for(auto &o : conv_out)
225 {
226 o.allocator()->free();
227 }
228 for(auto &o : dwc_out)
229 {
230 o.allocator()->free();
231 }
232 pool_out.allocator()->free();
233 }
234
235 /** Runs the model */
236 void run()
237 {
238 conv3x3.run();
239 conv3x3_act.run();
240 depthwise_conv_block_run(0);
241 depthwise_conv_block_run(1);
242 depthwise_conv_block_run(2);
243 depthwise_conv_block_run(3);
244 depthwise_conv_block_run(4);
245 depthwise_conv_block_run(5);
246 depthwise_conv_block_run(6);
247 depthwise_conv_block_run(7);
248 depthwise_conv_block_run(8);
249 depthwise_conv_block_run(9);
250 depthwise_conv_block_run(10);
251 depthwise_conv_block_run(11);
252 depthwise_conv_block_run(12);
253 pool.run();
254 conv1x1[13].run();
255 logistic.run();
256 reshape.run();
257 }
258
Joel Liang1c5ffd62017-12-28 10:09:51 +0800259 /** Sync the results */
260 void sync()
261 {
262 sync_if_necessary<TensorType>();
263 sync_tensor_if_necessary<TensorType>(output);
264 }
265
Georgios Pinitas93dcd832017-10-27 12:48:20 +0100266private:
267 void depthwise_conv_block_init(unsigned int idx, unsigned int ifm, unsigned int ofm)
268 {
269 w_dwc[idx].allocator()->init(TensorInfo(TensorShape(3U, 3U, ifm), 1, DataType::F32));
270 b_dwc[idx].allocator()->init(TensorInfo(TensorShape(ifm), 1, DataType::F32));
271 w_conv[idx].allocator()->init(TensorInfo(TensorShape(1U, 1U, ifm, ofm), 1, DataType::F32));
272 b_conv[idx].allocator()->init(TensorInfo(TensorShape(ofm), 1, DataType::F32));
273 }
274 void depthwise_conv_block_build(unsigned int idx, PadStrideInfo dwc_ps, PadStrideInfo conv_ps)
275 {
276 dwc3x3[idx].configure(&conv_out[idx], &w_dwc[idx], &b_dwc[idx], &dwc_out[idx], dwc_ps);
277 act[2 * idx].configure(&dwc_out[idx], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
278 conv1x1[idx].configure(&dwc_out[idx], &w_conv[idx], &b_conv[idx], &conv_out[idx + 1], conv_ps);
279 act[2 * idx + 1].configure(&conv_out[idx], nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f));
280 }
281 void depthwise_conv_block_run(unsigned int idx)
282 {
283 dwc3x3[idx].run();
284 act[2 * idx].run();
285 conv1x1[idx].run();
286 act[2 * idx + 1].run();
287 }
288
289private:
290 unsigned int _batches{ 0 };
291
292 ConvolutionLayerFunction conv3x3{};
293 ActivationLayerFunction conv3x3_act{};
Giorgio Arena04a8f8c2017-11-23 11:45:24 +0000294 std::array<ActivationLayerFunction, 26> act{ {} };
295 std::array<DirectConvolutionLayerFunction, 14> conv1x1{ {} };
296 std::array<DepthwiseConvolutionLayerFunction, 13> dwc3x3{ {} };
Georgios Pinitas93dcd832017-10-27 12:48:20 +0100297 PoolingLayerFunction pool{};
298 ActivationLayerFunction logistic{};
299 ReshapeFunction reshape{};
300
301 TensorType w_conv3x3{}, b_conv3x3{};
302 std::array<TensorType, 14> w_conv{ {} }, b_conv{ {} };
303 std::array<TensorType, 13> w_dwc{ {} }, b_dwc{ {} };
304
305 TensorType input{}, output{};
306
307 std::array<TensorType, 15> conv_out{ {} };
308 std::array<TensorType, 13> dwc_out{ {} };
309 TensorType pool_out{};
310};
311} // namespace networks
312} // namespace test
313} // namespace arm_compute
314#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_MOBILENET_H__