blob: 7e1a855f073ae01e6c635cf440b2f6e1727763cc [file] [log] [blame]
Moritz Pflanzeree493ae2017-07-05 10:52:21 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
25#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
26
Moritz Pflanzerfb5aabb2017-07-18 14:39:55 +010027#include "AssetsLibrary.h"
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010028#include "Globals.h"
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010029#include "Utils.h"
30
31#include <memory>
32
33namespace arm_compute
34{
35namespace test
36{
37namespace networks
38{
39/** AlexNet model object */
40template <typename ITensorType,
41 typename TensorType,
42 typename SubTensorType,
43 typename Accessor,
44 typename ActivationLayerFunction,
45 typename ConvolutionLayerFunction,
46 typename FullyConnectedLayerFunction,
47 typename NormalizationLayerFunction,
48 typename PoolingLayerFunction,
49 typename SoftmaxLayerFunction>
50class AlexNetNetwork
51{
52public:
53 void init(DataType data_type, int fixed_point_position, int batches, bool reshaped_weights = false)
54 {
55 _data_type = data_type;
56 _fixed_point_position = fixed_point_position;
57 _batches = batches;
58 _reshaped_weights = reshaped_weights;
59
60 // Initialize weights and biases
61 if(!_reshaped_weights)
62 {
63 init_weights();
64 }
65 else
66 {
67 init_reshaped_weights();
68 }
69 }
70
71 void build()
72 {
73 input.allocator()->init(TensorInfo(TensorShape(227U, 227U, 3U, _batches), 1, _data_type, _fixed_point_position));
74 output.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type, _fixed_point_position));
75
76 // Initialize intermediate tensors
77 // Layer 1
78 conv1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
79 act1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
80 norm1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
81 pool1_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 96U, _batches), 1, _data_type, _fixed_point_position));
82 pool11_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates()));
83 pool12_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates(0, 0, 48)));
84 // Layer 2
85 conv2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
86 conv21_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates()));
87 conv22_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates(0, 0, 128)));
88 act2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
89 norm2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
90 pool2_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
91 // Layer 3
92 conv3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
93 act3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
94 act31_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
95 act32_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
96 // Layer 4
97 conv4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
98 conv41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
99 conv42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
100 act4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
101 act41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
102 act42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
103 // Layer 5
104 conv5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
105 conv51_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates()));
106 conv52_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates(0, 0, 128)));
107 act5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
108 pool5_out.allocator()->init(TensorInfo(TensorShape(6U, 6U, 256U, _batches), 1, _data_type, _fixed_point_position));
109 // Layer 6
110 fc6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
111 act6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
112 // Layer 7
113 fc7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
114 act7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
115 // Layer 8
116 fc8_out.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type, _fixed_point_position));
117
118 // Configure Layers
119 // Layer 1
120 TensorType *b0 = _reshaped_weights ? nullptr : &b[0];
121 conv1.configure(&input, &w[0], b0, &conv1_out, PadStrideInfo(4, 4, 0, 0), WeightsInfo(_reshaped_weights, 11U, 11U));
122 act1.configure(&conv1_out, &act1_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
123 norm1.configure(&act1_out, &norm1_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
124 pool1.configure(&norm1_out, &pool1_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
125 // Layer 2
126 conv21.configure(pool11_out.get(), w21.get(), b21.get(), conv21_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U));
127 conv22.configure(pool12_out.get(), w22.get(), b22.get(), conv22_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U));
128 act2.configure(&conv2_out, &act2_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
129 norm2.configure(&act2_out, &norm2_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
130 pool2.configure(&norm2_out, &pool2_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
131 // Layer 3
132 TensorType *b2 = _reshaped_weights ? nullptr : &b[2];
133 conv3.configure(&pool2_out, &w[2], b2, &conv3_out, PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
134 act3.configure(&conv3_out, &act3_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
135 // Layer 4
136 conv41.configure(act31_out.get(), w41.get(), b41.get(), conv41_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
137 conv42.configure(act32_out.get(), w42.get(), b42.get(), conv42_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
138 act4.configure(&conv4_out, &act4_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
139 // Layer 5
140 conv51.configure(act41_out.get(), w51.get(), b51.get(), conv51_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
141 conv52.configure(act42_out.get(), w52.get(), b52.get(), conv52_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U));
142 act5.configure(&conv5_out, &act5_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
143 pool5.configure(&act5_out, &pool5_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
144 // Layer 6
145 fc6.configure(&pool5_out, &w[5], &b[5], &fc6_out, true, _reshaped_weights);
146 act6.configure(&fc6_out, &act6_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
147 // Layer 7
148 fc7.configure(&act6_out, &w[6], &b[6], &fc7_out, true, _reshaped_weights);
149 act7.configure(&fc7_out, &act7_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
150 // Layer 8
151 fc8.configure(&act7_out, &w[7], &b[7], &fc8_out, true, _reshaped_weights);
152 // Softmax
153 smx.configure(&fc8_out, &output);
154 }
155
156 void allocate()
157 {
158 input.allocator()->allocate();
159 output.allocator()->allocate();
160
161 if(!_reshaped_weights)
162 {
163 for(auto &wi : w)
164 {
165 wi.allocator()->allocate();
166 }
167
168 for(auto &bi : b)
169 {
170 bi.allocator()->allocate();
171 }
172 }
173 else
174 {
175 w[0].allocator()->allocate();
176 w[2].allocator()->allocate();
177 w[5].allocator()->allocate();
178 w[6].allocator()->allocate();
179 w[7].allocator()->allocate();
180
181 b[5].allocator()->allocate();
182 b[6].allocator()->allocate();
183 b[7].allocator()->allocate();
184
185 dynamic_cast<TensorType *>(w21.get())->allocator()->allocate();
186 dynamic_cast<TensorType *>(w22.get())->allocator()->allocate();
187 dynamic_cast<TensorType *>(w41.get())->allocator()->allocate();
188 dynamic_cast<TensorType *>(w42.get())->allocator()->allocate();
189 dynamic_cast<TensorType *>(w51.get())->allocator()->allocate();
190 dynamic_cast<TensorType *>(w52.get())->allocator()->allocate();
191 }
192
193 conv1_out.allocator()->allocate();
194 act1_out.allocator()->allocate();
195 norm1_out.allocator()->allocate();
196 pool1_out.allocator()->allocate();
197 conv2_out.allocator()->allocate();
198 act2_out.allocator()->allocate();
199 norm2_out.allocator()->allocate();
200 pool2_out.allocator()->allocate();
201 conv3_out.allocator()->allocate();
202 act3_out.allocator()->allocate();
203 conv4_out.allocator()->allocate();
204 act4_out.allocator()->allocate();
205 conv5_out.allocator()->allocate();
206 act5_out.allocator()->allocate();
207 pool5_out.allocator()->allocate();
208 fc6_out.allocator()->allocate();
209 act6_out.allocator()->allocate();
210 fc7_out.allocator()->allocate();
211 act7_out.allocator()->allocate();
212 fc8_out.allocator()->allocate();
213 }
214
215 /** Fills the trainable parameters and input with random data. */
216 void fill_random()
217 {
218 library->fill_tensor_uniform(Accessor(input), 0);
219
220 if(!_reshaped_weights)
221 {
222 for(unsigned int i = 0; i < w.size(); ++i)
223 {
224 library->fill_tensor_uniform(Accessor(w[i]), i + 1);
225 library->fill_tensor_uniform(Accessor(b[i]), i + 10);
226 }
227 }
228 else
229 {
230 library->fill_tensor_uniform(Accessor(w[0]), 1);
231 library->fill_tensor_uniform(Accessor(w[2]), 2);
232
233 library->fill_tensor_uniform(Accessor(w[5]), 3);
234 library->fill_tensor_uniform(Accessor(b[5]), 4);
235 library->fill_tensor_uniform(Accessor(w[6]), 5);
236 library->fill_tensor_uniform(Accessor(b[6]), 6);
237 library->fill_tensor_uniform(Accessor(w[7]), 7);
238 library->fill_tensor_uniform(Accessor(b[7]), 8);
239
240 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w21.get())), 9);
241 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w22.get())), 10);
242 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w41.get())), 11);
243 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w42.get())), 12);
244 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w51.get())), 13);
245 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w52.get())), 14);
246 }
247 }
248
249#ifdef INTERNAL_ONLY
250 /** Fills the trainable parameters from binary files
251 *
252 * @param weights Files names containing the weights data
253 * @param biases Files names containing the bias data
254 */
255 void fill(std::vector<std::string> weights, std::vector<std::string> biases)
256 {
257 ARM_COMPUTE_ERROR_ON(weights.size() != w.size());
258 ARM_COMPUTE_ERROR_ON(biases.size() != b.size());
259 ARM_COMPUTE_ERROR_ON(_reshaped_weights);
260
261 for(unsigned int i = 0; i < weights.size(); ++i)
262 {
263 library->fill_layer_data(Accessor(w[i]), weights[i]);
264 library->fill_layer_data(Accessor(b[i]), biases[i]);
265 }
266 }
267
268 /** Feed input to network from file.
269 *
270 * @param name File name of containing the input data.
271 */
272 void feed(std::string name)
273 {
274 library->fill_layer_data(Accessor(input), name);
275 }
276#endif /* INTERNAL_ONLY */
277
278 /** Get the classification results.
279 *
280 * @return Vector containing the classified labels
281 */
282 std::vector<unsigned int> get_classifications()
283 {
284 std::vector<unsigned int> classified_labels;
285 Accessor output_accessor(output);
286
287 Window window;
288 window.set(Window::DimX, Window::Dimension(0, 1, 1));
289 for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
290 {
291 window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
292 }
293
294 execute_window_loop(window, [&](const Coordinates & id)
295 {
296 int max_idx = 0;
297 float val = 0;
298 const void *const out_ptr = output_accessor(id);
299 for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
300 {
301 float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
302 if(curr_val > val)
303 {
304 max_idx = l;
305 val = curr_val;
306 }
307 }
308 classified_labels.push_back(max_idx);
309 });
310 return classified_labels;
311 }
312
313 /** Clear all allocated memory from the tensor objects */
314 void clear()
315 {
316 // Free allocations
317 input.allocator()->free();
318 output.allocator()->free();
319
320 if(!_reshaped_weights)
321 {
322 for(auto &wi : w)
323 {
324 wi.allocator()->free();
325 }
326
327 for(auto &bi : b)
328 {
329 bi.allocator()->free();
330 }
331 }
332 else
333 {
334 w[0].allocator()->free();
335 w[2].allocator()->free();
336 w[5].allocator()->free();
337 w[6].allocator()->free();
338 w[7].allocator()->free();
339
340 b[5].allocator()->free();
341 b[6].allocator()->free();
342 b[7].allocator()->free();
343 }
344
345 w21.reset();
346 w22.reset();
347 b21.reset();
348 b21.reset();
349 w41.reset();
350 w42.reset();
351 b41.reset();
352 b42.reset();
353 w51.reset();
354 w52.reset();
355 b51.reset();
356 b52.reset();
357
358 conv1_out.allocator()->free();
359 act1_out.allocator()->free();
360 norm1_out.allocator()->free();
361 pool1_out.allocator()->free();
362 conv2_out.allocator()->free();
363 act2_out.allocator()->free();
364 norm2_out.allocator()->free();
365 pool2_out.allocator()->free();
366 conv3_out.allocator()->free();
367 act3_out.allocator()->free();
368 conv4_out.allocator()->free();
369 act4_out.allocator()->free();
370 conv5_out.allocator()->free();
371 act5_out.allocator()->free();
372 pool5_out.allocator()->free();
373 fc6_out.allocator()->free();
374 act6_out.allocator()->free();
375 fc7_out.allocator()->free();
376 act7_out.allocator()->free();
377 fc8_out.allocator()->free();
378 }
379
380 /** Runs the model */
381 void run()
382 {
383 // Layer 1
384 conv1.run();
385 act1.run();
386 norm1.run();
387 pool1.run();
388 // Layer 2
389 conv21.run();
390 conv22.run();
391 act2.run();
392 norm2.run();
393 pool2.run();
394 // Layer 3
395 conv3.run();
396 act3.run();
397 // Layer 4
398 conv41.run();
399 conv42.run();
400 act4.run();
401 // Layer 5
402 conv51.run();
403 conv52.run();
404 act5.run();
405 pool5.run();
406 // Layer 6
407 fc6.run();
408 act6.run();
409 // Layer 7
410 fc7.run();
411 act7.run();
412 // Layer 8
413 fc8.run();
414 // Softmax
415 smx.run();
416 }
417
418private:
419 void init_weights()
420 {
421 w[0].allocator()->init(TensorInfo(TensorShape(11U, 11U, 3U, 96U), 1, _data_type, _fixed_point_position));
422 b[0].allocator()->init(TensorInfo(TensorShape(96U), 1, _data_type, _fixed_point_position));
423 w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type, _fixed_point_position));
424 b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
425 w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type, _fixed_point_position));
426 b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
427 w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type, _fixed_point_position));
428 b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
429 w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type, _fixed_point_position));
430 b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
431 w[5].allocator()->init(TensorInfo(TensorShape(9216U, 4096U), 1, _data_type, _fixed_point_position));
432 b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
433 w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type, _fixed_point_position));
434 b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
435 w[7].allocator()->init(TensorInfo(TensorShape(4096U, 1000U), 1, _data_type, _fixed_point_position));
436 b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type, _fixed_point_position));
437
438 w21 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates()));
439 w22 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates(0, 0, 0, 128)));
440 b21 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates()));
441 b22 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates(128)));
442
443 w41 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates()));
444 w42 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates(0, 0, 0, 192)));
445 b41 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates()));
446 b42 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates(192)));
447
448 w51 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates()));
449 w52 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates(0, 0, 0, 128)));
450 b51 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates()));
451 b52 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates(128)));
452 }
453
454 void init_reshaped_weights()
455 {
456 const unsigned int data_type_size = 16 / arm_compute::data_size_from_type(_data_type);
457
458 // Create tensor for the reshaped weights
459 auto w21_tensor = std::unique_ptr<TensorType>(new TensorType());
460 auto w22_tensor = std::unique_ptr<TensorType>(new TensorType());
461 auto w41_tensor = std::unique_ptr<TensorType>(new TensorType());
462 auto w42_tensor = std::unique_ptr<TensorType>(new TensorType());
463 auto w51_tensor = std::unique_ptr<TensorType>(new TensorType());
464 auto w52_tensor = std::unique_ptr<TensorType>(new TensorType());
465
466 w[0].allocator()->init(TensorInfo(TensorShape(366U * data_type_size, 96U / data_type_size), 1, _data_type, _fixed_point_position));
467 w21_tensor->allocator()->init(TensorInfo(TensorShape(1248U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position));
468 w22_tensor->allocator()->init(TensorInfo(TensorShape(1248U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position));
469 w[2].allocator()->init(TensorInfo(TensorShape(2560U * data_type_size, 384U / data_type_size), 1, _data_type, _fixed_point_position));
470 w41_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 192U / data_type_size), 1, _data_type, _fixed_point_position));
471 w42_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 192U / data_type_size), 1, _data_type, _fixed_point_position));
472 w51_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position));
473 w52_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position));
474
475 w21 = std::move(w21_tensor);
476 w22 = std::move(w22_tensor);
477 w41 = std::move(w41_tensor);
478 w42 = std::move(w42_tensor);
479 w51 = std::move(w51_tensor);
480 w52 = std::move(w52_tensor);
481
482 b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
483 b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
484 b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type, _fixed_point_position));
485
486 if(_batches > 1)
487 {
488 w[5].allocator()->init(TensorInfo(TensorShape(9216U * data_type_size, 4096U / data_type_size), 1, _data_type, _fixed_point_position));
489 w[6].allocator()->init(TensorInfo(TensorShape(4096U * data_type_size, 4096U / data_type_size), 1, _data_type, _fixed_point_position));
490 w[7].allocator()->init(TensorInfo(TensorShape(4096U * data_type_size, 1000U / data_type_size), 1, _data_type, _fixed_point_position));
491 }
492 else
493 {
494 w[5].allocator()->init(TensorInfo(TensorShape(4096U, 9216U), 1, _data_type, _fixed_point_position));
495 w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type, _fixed_point_position));
496 w[7].allocator()->init(TensorInfo(TensorShape(1000U, 4096U), 1, _data_type, _fixed_point_position));
497 }
498 }
499
500 DataType _data_type{ DataType::UNKNOWN };
501 int _fixed_point_position{ 0 };
502 unsigned int _batches{ 0 };
503 bool _reshaped_weights{ false };
504
505 ActivationLayerFunction act1{}, act2{}, act3{}, act4{}, act5{}, act6{}, act7{};
506 ConvolutionLayerFunction conv1{}, conv21{}, conv22{}, conv3{}, conv41{}, conv42{}, conv51{}, conv52{};
507 FullyConnectedLayerFunction fc6{}, fc7{}, fc8{};
508 NormalizationLayerFunction norm1{}, norm2{};
509 PoolingLayerFunction pool1{}, pool2{}, pool5{};
510 SoftmaxLayerFunction smx{};
511
512 TensorType input{}, output{};
513 std::array<TensorType, 8> w{ {} }, b{ {} };
514 std::unique_ptr<ITensorType> w21{ nullptr }, w22{ nullptr }, b21{ nullptr }, b22{ nullptr };
515 std::unique_ptr<ITensorType> w41{ nullptr }, w42{ nullptr }, b41{ nullptr }, b42{ nullptr };
516 std::unique_ptr<ITensorType> w51{ nullptr }, w52{ nullptr }, b51{ nullptr }, b52{ nullptr };
517
518 TensorType conv1_out{}, act1_out{}, norm1_out{}, pool1_out{};
519 TensorType conv2_out{}, act2_out{}, pool2_out{}, norm2_out{};
520 TensorType conv3_out{}, act3_out{};
521 TensorType conv4_out{}, act4_out{};
522 TensorType conv5_out{}, act5_out{}, pool5_out{};
523 TensorType fc6_out{}, act6_out{};
524 TensorType fc7_out{}, act7_out{};
525 TensorType fc8_out{};
526
527 std::unique_ptr<SubTensorType> pool11_out{}, pool12_out{};
528 std::unique_ptr<SubTensorType> conv21_out{}, conv22_out{};
529 std::unique_ptr<SubTensorType> act31_out{}, act32_out{};
530 std::unique_ptr<SubTensorType> conv41_out{}, conv42_out{}, act41_out{}, act42_out{};
531 std::unique_ptr<SubTensorType> conv51_out{}, conv52_out{};
532};
533} // namespace networks
534} // namespace test
535} // namespace arm_compute
536#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__