blob: 8443e9c97eaf1bd0ff5455f2f31e358d3dd7c2da [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
25#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
26
27#include "TensorLibrary.h"
28#include "Utils.h"
29
30#include <memory>
31
32using namespace arm_compute;
33using namespace arm_compute::test;
34
35namespace arm_compute
36{
37namespace test
38{
39namespace model_objects
40{
41/** AlexNet model object */
42template <typename ITensorType,
43 typename TensorType,
44 typename SubTensorType,
45 typename Accessor,
46 typename ActivationLayerFunction,
47 typename ConvolutionLayerFunction,
48 typename FullyConnectedLayerFunction,
49 typename NormalizationLayerFunction,
50 typename PoolingLayerFunction,
51 typename SoftmaxLayerFunction,
52 DataType dt = DataType::F32,
53 int fixed_point_position = 4>
54class AlexNet
55{
56public:
57 AlexNet()
58 : _batches(1), _reshaped_weights(false)
59 {
60 }
61
62 void init_weights(unsigned int batches, bool reshaped_weights = false)
63 {
64 _batches = batches;
65 _reshaped_weights = reshaped_weights;
66
67 // Initialize weights and biases
68 if(!_reshaped_weights)
69 {
70 for(auto &wi : w)
71 {
72 wi = std::unique_ptr<TensorType>(new TensorType());
73 }
74 for(auto &bi : b)
75 {
76 bi = std::unique_ptr<TensorType>(new TensorType());
77 }
78 w[0]->allocator()->init(TensorInfo(TensorShape(11U, 11U, 3U, 96U), 1, dt, fixed_point_position));
79 b[0]->allocator()->init(TensorInfo(TensorShape(96U), 1, dt, fixed_point_position));
80 w[1]->allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, dt, fixed_point_position));
81 b[1]->allocator()->init(TensorInfo(TensorShape(256U), 1, dt, fixed_point_position));
82 w[2]->allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, dt, fixed_point_position));
83 b[2]->allocator()->init(TensorInfo(TensorShape(384U), 1, dt, fixed_point_position));
84 w[3]->allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, dt, fixed_point_position));
85 b[3]->allocator()->init(TensorInfo(TensorShape(384U), 1, dt, fixed_point_position));
86 w[4]->allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, dt, fixed_point_position));
87 b[4]->allocator()->init(TensorInfo(TensorShape(256U), 1, dt, fixed_point_position));
88 w[5]->allocator()->init(TensorInfo(TensorShape(9216U, 4096U), 1, dt, fixed_point_position));
89 b[5]->allocator()->init(TensorInfo(TensorShape(4096U), 1, dt, fixed_point_position));
90 w[6]->allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, dt, fixed_point_position));
91 b[6]->allocator()->init(TensorInfo(TensorShape(4096U), 1, dt, fixed_point_position));
92 w[7]->allocator()->init(TensorInfo(TensorShape(4096U, 1000U), 1, dt, fixed_point_position));
93 b[7]->allocator()->init(TensorInfo(TensorShape(1000U), 1, dt, fixed_point_position));
94
95 w21 = std::unique_ptr<SubTensorType>(new SubTensorType(w[1].get(), TensorShape(5U, 5U, 48U, 128U), Coordinates()));
96 w22 = std::unique_ptr<SubTensorType>(new SubTensorType(w[1].get(), TensorShape(5U, 5U, 48U, 128U), Coordinates(0, 0, 0, 128)));
97 b21 = std::unique_ptr<SubTensorType>(new SubTensorType(b[1].get(), TensorShape(128U), Coordinates()));
98 b22 = std::unique_ptr<SubTensorType>(new SubTensorType(b[1].get(), TensorShape(128U), Coordinates(128)));
99
100 w41 = std::unique_ptr<SubTensorType>(new SubTensorType(w[3].get(), TensorShape(3U, 3U, 192U, 192U), Coordinates()));
101 w42 = std::unique_ptr<SubTensorType>(new SubTensorType(w[3].get(), TensorShape(3U, 3U, 192U, 192U), Coordinates(0, 0, 0, 192)));
102 b41 = std::unique_ptr<SubTensorType>(new SubTensorType(b[3].get(), TensorShape(192U), Coordinates()));
103 b42 = std::unique_ptr<SubTensorType>(new SubTensorType(b[3].get(), TensorShape(192U), Coordinates(192)));
104
105 w51 = std::unique_ptr<SubTensorType>(new SubTensorType(w[4].get(), TensorShape(3U, 3U, 192U, 128U), Coordinates()));
106 w52 = std::unique_ptr<SubTensorType>(new SubTensorType(w[4].get(), TensorShape(3U, 3U, 192U, 128U), Coordinates(0, 0, 0, 128)));
107 b51 = std::unique_ptr<SubTensorType>(new SubTensorType(b[4].get(), TensorShape(128U), Coordinates()));
108 b52 = std::unique_ptr<SubTensorType>(new SubTensorType(b[4].get(), TensorShape(128U), Coordinates(128)));
109 }
110 else
111 {
112 const unsigned int dt_size = 16 / arm_compute::data_size_from_type(dt);
113
114 // Create tensor for the reshaped weights
115 w[0] = std::unique_ptr<TensorType>(new TensorType());
116 auto w21_tensor = std::unique_ptr<TensorType>(new TensorType());
117 auto w22_tensor = std::unique_ptr<TensorType>(new TensorType());
118 w[2] = std::unique_ptr<TensorType>(new TensorType());
119 auto w41_tensor = std::unique_ptr<TensorType>(new TensorType());
120 auto w42_tensor = std::unique_ptr<TensorType>(new TensorType());
121 auto w51_tensor = std::unique_ptr<TensorType>(new TensorType());
122 auto w52_tensor = std::unique_ptr<TensorType>(new TensorType());
123
124 w[0]->allocator()->init(TensorInfo(TensorShape(366U * dt_size, 96U / dt_size), 1, dt, fixed_point_position));
125 w21_tensor->allocator()->init(TensorInfo(TensorShape(1248U * dt_size, 128U / dt_size), 1, dt, fixed_point_position));
126 w22_tensor->allocator()->init(TensorInfo(TensorShape(1248U * dt_size, 128U / dt_size), 1, dt, fixed_point_position));
127 w[2]->allocator()->init(TensorInfo(TensorShape(2560U * dt_size, 384U / dt_size), 1, dt, fixed_point_position));
128 w41_tensor->allocator()->init(TensorInfo(TensorShape(1920U * dt_size, 192U / dt_size), 1, dt, fixed_point_position));
129 w42_tensor->allocator()->init(TensorInfo(TensorShape(1920U * dt_size, 192U / dt_size), 1, dt, fixed_point_position));
130 w51_tensor->allocator()->init(TensorInfo(TensorShape(1920U * dt_size, 128U / dt_size), 1, dt, fixed_point_position));
131 w52_tensor->allocator()->init(TensorInfo(TensorShape(1920U * dt_size, 128U / dt_size), 1, dt, fixed_point_position));
132
133 w21 = std::move(w21_tensor);
134 w22 = std::move(w22_tensor);
135 w41 = std::move(w41_tensor);
136 w42 = std::move(w42_tensor);
137 w51 = std::move(w51_tensor);
138 w52 = std::move(w52_tensor);
139
140 w[5] = std::unique_ptr<TensorType>(new TensorType());
141 w[6] = std::unique_ptr<TensorType>(new TensorType());
142 w[7] = std::unique_ptr<TensorType>(new TensorType());
143 b[5] = std::unique_ptr<TensorType>(new TensorType());
144 b[6] = std::unique_ptr<TensorType>(new TensorType());
145 b[7] = std::unique_ptr<TensorType>(new TensorType());
146
147 b[5]->allocator()->init(TensorInfo(TensorShape(4096U), 1, dt, fixed_point_position));
148 b[6]->allocator()->init(TensorInfo(TensorShape(4096U), 1, dt, fixed_point_position));
149 b[7]->allocator()->init(TensorInfo(TensorShape(1000U), 1, dt, fixed_point_position));
150
151 if(_batches > 1)
152 {
153 w[5]->allocator()->init(TensorInfo(TensorShape(9216U * dt_size, 4096U / dt_size), 1, dt, fixed_point_position));
154 w[6]->allocator()->init(TensorInfo(TensorShape(4096U * dt_size, 4096U / dt_size), 1, dt, fixed_point_position));
155 w[7]->allocator()->init(TensorInfo(TensorShape(4096U * dt_size, 1000U / dt_size), 1, dt, fixed_point_position));
156 }
157 else
158 {
159 w[5]->allocator()->init(TensorInfo(TensorShape(4096U, 9216U), 1, dt, fixed_point_position));
160 w[6]->allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, dt, fixed_point_position));
161 w[7]->allocator()->init(TensorInfo(TensorShape(1000U, 4096U), 1, dt, fixed_point_position));
162 }
163 }
164 }
165
166 void build()
167 {
168 input.allocator()->init(TensorInfo(TensorShape(227U, 227U, 3U, _batches), 1, dt, fixed_point_position));
169 output.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, dt, fixed_point_position));
170
171 // Initialize intermediate tensors
172 // Layer 1
173 conv1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, dt, fixed_point_position));
174 act1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, dt, fixed_point_position));
175 norm1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, dt, fixed_point_position));
176 pool1_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 96U, _batches), 1, dt, fixed_point_position));
177 pool11_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates()));
178 pool12_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates(0, 0, 48)));
179 // Layer 2
180 conv2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, dt, fixed_point_position));
181 conv21_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates()));
182 conv22_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates(0, 0, 128)));
183 act2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, dt, fixed_point_position));
184 norm2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, dt, fixed_point_position));
185 pool2_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, dt, fixed_point_position));
186 // Layer 3
187 conv3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, dt, fixed_point_position));
188 act3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, dt, fixed_point_position));
189 act31_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
190 act32_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
191 // Layer 4
192 conv4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, dt, fixed_point_position));
193 conv41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
194 conv42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
195 act4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, dt, fixed_point_position));
196 act41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
197 act42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
198 // Layer 5
199 conv5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, dt, fixed_point_position));
200 conv51_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates()));
201 conv52_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates(0, 0, 128)));
202 act5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, dt, fixed_point_position));
203 pool5_out.allocator()->init(TensorInfo(TensorShape(6U, 6U, 256U, _batches), 1, dt, fixed_point_position));
204 // Layer 6
205 fc6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, dt, fixed_point_position));
206 act6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, dt, fixed_point_position));
207 // Layer 7
208 fc7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, dt, fixed_point_position));
209 act7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, dt, fixed_point_position));
210 // Layer 8
211 fc8_out.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, dt, fixed_point_position));
212
213 // Allocate layers
214 {
215 // Layer 1
216 conv1 = std::unique_ptr<ConvolutionLayerFunction>(new ConvolutionLayerFunction());
217 act1 = std::unique_ptr<ActivationLayerFunction>(new ActivationLayerFunction());
218 norm1 = std::unique_ptr<NormalizationLayerFunction>(new NormalizationLayerFunction());
219 pool1 = std::unique_ptr<PoolingLayerFunction>(new PoolingLayerFunction());
220 // Layer 2
221 conv21 = std::unique_ptr<ConvolutionLayerFunction>(new ConvolutionLayerFunction());
222 conv22 = std::unique_ptr<ConvolutionLayerFunction>(new ConvolutionLayerFunction());
223 act2 = std::unique_ptr<ActivationLayerFunction>(new ActivationLayerFunction());
224 norm2 = std::unique_ptr<NormalizationLayerFunction>(new NormalizationLayerFunction());
225 pool2 = std::unique_ptr<PoolingLayerFunction>(new PoolingLayerFunction());
226 // Layer 3
227 conv3 = std::unique_ptr<ConvolutionLayerFunction>(new ConvolutionLayerFunction());
228 act3 = std::unique_ptr<ActivationLayerFunction>(new ActivationLayerFunction());
229 // Layer 4
230 conv41 = std::unique_ptr<ConvolutionLayerFunction>(new ConvolutionLayerFunction());
231 conv42 = std::unique_ptr<ConvolutionLayerFunction>(new ConvolutionLayerFunction());
232 act4 = std::unique_ptr<ActivationLayerFunction>(new ActivationLayerFunction());
233 // Layer 5
234 conv51 = std::unique_ptr<ConvolutionLayerFunction>(new ConvolutionLayerFunction());
235 conv52 = std::unique_ptr<ConvolutionLayerFunction>(new ConvolutionLayerFunction());
236 act5 = std::unique_ptr<ActivationLayerFunction>(new ActivationLayerFunction());
237 pool5 = std::unique_ptr<PoolingLayerFunction>(new PoolingLayerFunction());
238 // Layer 6
239 fc6 = std::unique_ptr<FullyConnectedLayerFunction>(new FullyConnectedLayerFunction());
240 act6 = std::unique_ptr<ActivationLayerFunction>(new ActivationLayerFunction());
241 // Layer 7
242 fc7 = std::unique_ptr<FullyConnectedLayerFunction>(new FullyConnectedLayerFunction());
243 act7 = std::unique_ptr<ActivationLayerFunction>(new ActivationLayerFunction());
244 // Layer 8
245 fc8 = std::unique_ptr<FullyConnectedLayerFunction>(new FullyConnectedLayerFunction());
246 // Softmax
247 smx = std::unique_ptr<SoftmaxLayerFunction>(new SoftmaxLayerFunction());
248 }
249
250 // Configure Layers
251 {
252 // Layer 1
Gian Marco Iodice4e288692017-06-27 11:41:59 +0100253 conv1->configure(&input, w[0].get(), b[0].get(), &conv1_out, PadStrideInfo(4, 4, 0, 0), WeightsInfo(_reshaped_weights, 11U, 11U));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100254 act1->configure(&conv1_out, &act1_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
255 norm1->configure(&act1_out, &norm1_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
256 pool1->configure(&norm1_out, &pool1_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
257 // Layer 2
Gian Marco Iodice4e288692017-06-27 11:41:59 +0100258 conv21->configure(pool11_out.get(), w21.get(), b21.get(), conv21_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U));
259 conv22->configure(pool12_out.get(), w22.get(), b22.get(), conv22_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100260 act2->configure(&conv2_out, &act2_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
261 norm2->configure(&act2_out, &norm2_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
262 pool2->configure(&norm2_out, &pool2_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
263 // Layer 3
Gian Marco Iodice4e288692017-06-27 11:41:59 +0100264 conv3->configure(&pool2_out, w[2].get(), b[2].get(), &conv3_out, PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100265 act3->configure(&conv3_out, &act3_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
266 // Layer 4
Gian Marco Iodice4e288692017-06-27 11:41:59 +0100267 conv41->configure(act31_out.get(), w41.get(), b41.get(), conv41_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
268 conv42->configure(act32_out.get(), w42.get(), b42.get(), conv42_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100269 act4->configure(&conv4_out, &act4_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
270 // Layer 5
Gian Marco Iodice4e288692017-06-27 11:41:59 +0100271 conv51->configure(act41_out.get(), w51.get(), b51.get(), conv51_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
272 conv52->configure(act42_out.get(), w52.get(), b52.get(), conv52_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100273 act5->configure(&conv5_out, &act5_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
274 pool5->configure(&act5_out, &pool5_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
275 // Layer 6
276 fc6->configure(&pool5_out, w[5].get(), b[5].get(), &fc6_out, true, _reshaped_weights);
277 act6->configure(&fc6_out, &act6_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
278 // Layer 7
279 fc7->configure(&act6_out, w[6].get(), b[6].get(), &fc7_out, true, _reshaped_weights);
280 act7->configure(&fc7_out, &act7_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
281 // Layer 8
282 fc8->configure(&act7_out, w[7].get(), b[7].get(), &fc8_out, true, _reshaped_weights);
283 // Softmax
284 smx->configure(&fc8_out, &output);
285 }
286 }
287
288 void allocate()
289 {
290 input.allocator()->allocate();
291 output.allocator()->allocate();
292 for(auto &wi : w)
293 {
294 if(wi.get())
295 {
296 wi->allocator()->allocate();
297 }
298 }
299 for(auto &bi : b)
300 {
301 if(bi.get())
302 {
303 bi->allocator()->allocate();
304 }
305 }
306 if(_reshaped_weights)
307 {
308 dynamic_cast<TensorType *>(w21.get())->allocator()->allocate();
309 dynamic_cast<TensorType *>(w22.get())->allocator()->allocate();
310 dynamic_cast<TensorType *>(w41.get())->allocator()->allocate();
311 dynamic_cast<TensorType *>(w42.get())->allocator()->allocate();
312 dynamic_cast<TensorType *>(w51.get())->allocator()->allocate();
313 dynamic_cast<TensorType *>(w52.get())->allocator()->allocate();
314 }
315 conv1_out.allocator()->allocate();
316 act1_out.allocator()->allocate();
317 norm1_out.allocator()->allocate();
318 pool1_out.allocator()->allocate();
319 conv2_out.allocator()->allocate();
320 act2_out.allocator()->allocate();
321 norm2_out.allocator()->allocate();
322 pool2_out.allocator()->allocate();
323 conv3_out.allocator()->allocate();
324 act3_out.allocator()->allocate();
325 conv4_out.allocator()->allocate();
326 act4_out.allocator()->allocate();
327 conv5_out.allocator()->allocate();
328 act5_out.allocator()->allocate();
329 pool5_out.allocator()->allocate();
330 fc6_out.allocator()->allocate();
331 act6_out.allocator()->allocate();
332 fc7_out.allocator()->allocate();
333 act7_out.allocator()->allocate();
334 fc8_out.allocator()->allocate();
335 }
336
337 /** Fills the trainable parameters and input with random data. */
338 void fill_random()
339 {
340 library->fill_tensor_uniform(Accessor(input), 0);
341 if(!_reshaped_weights)
342 {
343 for(unsigned int i = 0; i < w.size(); ++i)
344 {
345 library->fill_tensor_uniform(Accessor(*w[i]), i + 1);
346 library->fill_tensor_uniform(Accessor(*b[i]), i + 10);
347 }
348 }
349 else
350 {
351 library->fill_tensor_uniform(Accessor(*w[0]), 1);
352 library->fill_tensor_uniform(Accessor(*w[2]), 2);
353
354 library->fill_tensor_uniform(Accessor(*w[5]), 3);
355 library->fill_tensor_uniform(Accessor(*b[5]), 4);
356 library->fill_tensor_uniform(Accessor(*w[6]), 5);
357 library->fill_tensor_uniform(Accessor(*b[6]), 6);
358 library->fill_tensor_uniform(Accessor(*w[7]), 7);
359 library->fill_tensor_uniform(Accessor(*b[7]), 8);
360
361 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w21.get())), 9);
362 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w22.get())), 10);
363 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w41.get())), 11);
364 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w42.get())), 12);
365 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w51.get())), 13);
366 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w52.get())), 14);
367 }
368 }
369
370#ifdef INTERNAL_ONLY
371 /** Fills the trainable parameters from binary files
372 *
373 * @param weights Files names containing the weights data
374 * @param biases Files names containing the bias data
375 */
376 void fill(std::vector<std::string> weights, std::vector<std::string> biases)
377 {
378 ARM_COMPUTE_ERROR_ON(weights.size() != w.size());
379 ARM_COMPUTE_ERROR_ON(biases.size() != b.size());
380 ARM_COMPUTE_ERROR_ON(_reshaped_weights);
381
382 for(unsigned int i = 0; i < weights.size(); ++i)
383 {
384 library->fill_layer_data(Accessor(*w[i]), weights[i]);
385 library->fill_layer_data(Accessor(*b[i]), biases[i]);
386 }
387 }
388
389 /** Feed input to network from file.
390 *
391 * @param name File name of containing the input data.
392 */
393 void feed(std::string name)
394 {
395 library->fill_layer_data(Accessor(input), name);
396 }
397#endif /* INTERNAL_ONLY */
398
399 /** Get the classification results.
400 *
401 * @return Vector containing the classified labels
402 */
403 std::vector<unsigned int> get_classifications()
404 {
405 std::vector<unsigned int> classified_labels;
406 Accessor output_accessor(output);
407
408 Window window;
409 window.set(Window::DimX, Window::Dimension(0, 1, 1));
410 for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
411 {
412 window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
413 }
414
415 execute_window_loop(window, [&](const Coordinates & id)
416 {
417 int max_idx = 0;
418 float val = 0;
419 const void *const out_ptr = output_accessor(id);
420 for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
421 {
422 float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
423 if(curr_val > val)
424 {
425 max_idx = l;
426 val = curr_val;
427 }
428 }
429 classified_labels.push_back(max_idx);
430 });
431 return classified_labels;
432 }
433
434 /** Clear all allocated memory from the tensor objects */
435 void clear()
436 {
437 conv1.reset();
438 act1.reset();
439 norm1.reset();
440 pool1.reset();
441 conv21.reset();
442 conv22.reset();
443 act2.reset();
444 norm2.reset();
445 pool2.reset();
446 conv3.reset();
447 act3.reset();
448 conv41.reset();
449 conv42.reset();
450 act4.reset();
451 conv51.reset();
452 conv52.reset();
453 act5.reset();
454 pool5.reset();
455 fc6.reset();
456 act6.reset();
457 fc7.reset();
458 act7.reset();
459 fc8.reset();
460 smx.reset();
461
462 // Free allocations
463 input.allocator()->free();
464 output.allocator()->free();
465 for(auto &wi : w)
466 {
467 wi.reset();
468 }
469 for(auto &bi : b)
470 {
471 bi.reset();
472 }
473
474 w21.reset();
475 w22.reset();
476 b21.reset();
477 b21.reset();
478 w41.reset();
479 w42.reset();
480 b41.reset();
481 b42.reset();
482 w51.reset();
483 w52.reset();
484 b51.reset();
485 b52.reset();
486
487 conv1_out.allocator()->free();
488 act1_out.allocator()->free();
489 norm1_out.allocator()->free();
490 pool1_out.allocator()->free();
491 conv2_out.allocator()->free();
492 act2_out.allocator()->free();
493 norm2_out.allocator()->free();
494 pool2_out.allocator()->free();
495 conv3_out.allocator()->free();
496 act3_out.allocator()->free();
497 conv4_out.allocator()->free();
498 act4_out.allocator()->free();
499 conv5_out.allocator()->free();
500 act5_out.allocator()->free();
501 pool5_out.allocator()->free();
502 fc6_out.allocator()->free();
503 act6_out.allocator()->free();
504 fc7_out.allocator()->free();
505 act7_out.allocator()->free();
506 fc8_out.allocator()->free();
507 }
508
509 /** Runs the model */
510 void run()
511 {
512 // Layer 1
513 conv1->run();
514 act1->run();
515 norm1->run();
516 pool1->run();
517 // Layer 2
518 conv21->run();
519 conv22->run();
520 act2->run();
521 norm2->run();
522 pool2->run();
523 // Layer 3
524 conv3->run();
525 act3->run();
526 // Layer 4
527 conv41->run();
528 conv42->run();
529 act4->run();
530 // Layer 5
531 conv51->run();
532 conv52->run();
533 act5->run();
534 pool5->run();
535 // Layer 6
536 fc6->run();
537 act6->run();
538 // Layer 7
539 fc7->run();
540 act7->run();
541 // Layer 8
542 fc8->run();
543 // Softmax
544 smx->run();
545 }
546
547private:
548 unsigned int _batches;
549 bool _reshaped_weights;
550
551 std::unique_ptr<ActivationLayerFunction> act1{ nullptr }, act2{ nullptr }, act3{ nullptr }, act4{ nullptr }, act5{ nullptr }, act6{ nullptr }, act7{ nullptr };
552 std::unique_ptr<ConvolutionLayerFunction> conv1{ nullptr }, conv21{ nullptr }, conv22{ nullptr }, conv3{ nullptr }, conv41{ nullptr }, conv42{ nullptr }, conv51{ nullptr }, conv52{ nullptr };
553 std::unique_ptr<FullyConnectedLayerFunction> fc6{ nullptr }, fc7{ nullptr }, fc8{};
554 std::unique_ptr<NormalizationLayerFunction> norm1{ nullptr }, norm2{ nullptr };
555 std::unique_ptr<PoolingLayerFunction> pool1{ nullptr }, pool2{ nullptr }, pool5{ nullptr };
556 std::unique_ptr<SoftmaxLayerFunction> smx{ nullptr };
557
558 TensorType input{}, output{};
559 std::array<std::unique_ptr<TensorType>, 8> w{}, b{};
560 std::unique_ptr<ITensorType> w21{ nullptr }, w22{ nullptr }, b21{ nullptr }, b22{ nullptr };
561 std::unique_ptr<ITensorType> w41{ nullptr }, w42{ nullptr }, b41{ nullptr }, b42{ nullptr };
562 std::unique_ptr<ITensorType> w51{ nullptr }, w52{ nullptr }, b51{ nullptr }, b52{ nullptr };
563
564 TensorType conv1_out{}, act1_out{}, norm1_out{}, pool1_out{};
565 TensorType conv2_out{}, act2_out{}, pool2_out{}, norm2_out{};
566 TensorType conv3_out{}, act3_out{};
567 TensorType conv4_out{}, act4_out{};
568 TensorType conv5_out{}, act5_out{}, pool5_out{};
569 TensorType fc6_out{}, act6_out{};
570 TensorType fc7_out{}, act7_out{};
571 TensorType fc8_out{};
572
573 std::unique_ptr<SubTensorType> pool11_out{ nullptr }, pool12_out{ nullptr };
574 std::unique_ptr<SubTensorType> conv21_out{ nullptr }, conv22_out{ nullptr };
575 std::unique_ptr<SubTensorType> act31_out{ nullptr }, act32_out{ nullptr };
576 std::unique_ptr<SubTensorType> conv41_out{ nullptr }, conv42_out{ nullptr }, act41_out{ nullptr }, act42_out{ nullptr };
577 std::unique_ptr<SubTensorType> conv51_out{ nullptr }, conv52_out{ nullptr };
578};
579} // namespace model_objects
580} // namespace test
581} // namespace arm_compute
582#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__