blob: 39c69daf60f111c17be50a83094e951ac380f771 [file] [log] [blame]
Moritz Pflanzeree493ae2017-07-05 10:52:21 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
25#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
26
Moritz Pflanzerfb5aabb2017-07-18 14:39:55 +010027#include "AssetsLibrary.h"
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010028#include "Globals.h"
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010029#include "Utils.h"
30
31#include <memory>
32
33namespace arm_compute
34{
35namespace test
36{
37namespace networks
38{
39/** AlexNet model object */
40template <typename ITensorType,
41 typename TensorType,
42 typename SubTensorType,
43 typename Accessor,
44 typename ActivationLayerFunction,
45 typename ConvolutionLayerFunction,
steniu01a629da12017-07-28 14:40:58 +010046 typename DirectConvolutionLayerFunction,
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010047 typename FullyConnectedLayerFunction,
48 typename NormalizationLayerFunction,
49 typename PoolingLayerFunction,
50 typename SoftmaxLayerFunction>
51class AlexNetNetwork
52{
53public:
54 void init(DataType data_type, int fixed_point_position, int batches, bool reshaped_weights = false)
55 {
56 _data_type = data_type;
57 _fixed_point_position = fixed_point_position;
58 _batches = batches;
59 _reshaped_weights = reshaped_weights;
60
61 // Initialize weights and biases
62 if(!_reshaped_weights)
63 {
steniu01a629da12017-07-28 14:40:58 +010064 w[0].allocator()->init(TensorInfo(TensorShape(11U, 11U, 3U, 96U), 1, _data_type, _fixed_point_position));
65 b[0].allocator()->init(TensorInfo(TensorShape(96U), 1, _data_type, _fixed_point_position));
66 w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type, _fixed_point_position));
67 b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
68 w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type, _fixed_point_position));
69 b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
70 w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type, _fixed_point_position));
71 b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
72 w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type, _fixed_point_position));
73 b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
74 w[5].allocator()->init(TensorInfo(TensorShape(9216U, 4096U), 1, _data_type, _fixed_point_position));
75 b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
76 w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type, _fixed_point_position));
77 b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
78 w[7].allocator()->init(TensorInfo(TensorShape(4096U, 1000U), 1, _data_type, _fixed_point_position));
79 b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type, _fixed_point_position));
80
81 w21 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates()));
82 w22 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates(0, 0, 0, 128)));
83 b21 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates()));
84 b22 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates(128)));
85
86 w41 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates()));
87 w42 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates(0, 0, 0, 192)));
88 b41 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates()));
89 b42 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates(192)));
90
91 w51 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates()));
92 w52 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates(0, 0, 0, 128)));
93 b51 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates()));
94 b52 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates(128)));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010095 }
96 else
97 {
steniu01a629da12017-07-28 14:40:58 +010098 const unsigned int data_type_size = 16 / arm_compute::data_size_from_type(_data_type);
99
100 // Create tensor for the reshaped weights
steniu01a629da12017-07-28 14:40:58 +0100101 w[0].allocator()->init(TensorInfo(TensorShape(366U * data_type_size, 96U / data_type_size), 1, _data_type, _fixed_point_position));
steniu01a629da12017-07-28 14:40:58 +0100102
103 // Configure the direct convolution's weights. Direct convolution doesn't need reshape weights
104 if(!_is_direct_conv)
105 {
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100106 auto w21_tensor = std::unique_ptr<TensorType>(new TensorType());
107 auto w22_tensor = std::unique_ptr<TensorType>(new TensorType());
steniu01a629da12017-07-28 14:40:58 +0100108 auto w41_tensor = std::unique_ptr<TensorType>(new TensorType());
109 auto w42_tensor = std::unique_ptr<TensorType>(new TensorType());
110 auto w51_tensor = std::unique_ptr<TensorType>(new TensorType());
111 auto w52_tensor = std::unique_ptr<TensorType>(new TensorType());
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100112 w21_tensor->allocator()->init(TensorInfo(TensorShape(1248U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position));
113 w22_tensor->allocator()->init(TensorInfo(TensorShape(1248U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position));
steniu01a629da12017-07-28 14:40:58 +0100114 w41_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 192U / data_type_size), 1, _data_type, _fixed_point_position));
115 w42_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 192U / data_type_size), 1, _data_type, _fixed_point_position));
116 w51_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position));
117 w52_tensor->allocator()->init(TensorInfo(TensorShape(1920U * data_type_size, 128U / data_type_size), 1, _data_type, _fixed_point_position));
118 w[2].allocator()->init(TensorInfo(TensorShape(2560U * data_type_size, 384U / data_type_size), 1, _data_type, _fixed_point_position));
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100119 w21 = std::move(w21_tensor);
120 w22 = std::move(w22_tensor);
steniu01a629da12017-07-28 14:40:58 +0100121 w41 = std::move(w41_tensor);
122 w42 = std::move(w42_tensor);
123 w51 = std::move(w51_tensor);
124 w52 = std::move(w52_tensor);
125 }
126 else
127 {
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100128 w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type, _fixed_point_position));
129 b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
steniu01a629da12017-07-28 14:40:58 +0100130 w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type, _fixed_point_position));
131 b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
132 w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type, _fixed_point_position));
133 b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
134 w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type, _fixed_point_position));
135 b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100136 w21 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates()));
137 w22 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates(0, 0, 0, 128)));
138 b21 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates()));
139 b22 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates(128)));
140
steniu01a629da12017-07-28 14:40:58 +0100141 w41 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates()));
142 w42 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates(0, 0, 0, 192)));
143 b41 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates()));
144 b42 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates(192)));
145
146 w51 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates()));
147 w52 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates(0, 0, 0, 128)));
148 b51 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates()));
149 b52 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates(128)));
150 }
151
152 b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
153 b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
154 b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type, _fixed_point_position));
155
156 if(_batches > 1)
157 {
158 w[5].allocator()->init(TensorInfo(TensorShape(9216U * data_type_size, 4096U / data_type_size), 1, _data_type, _fixed_point_position));
159 w[6].allocator()->init(TensorInfo(TensorShape(4096U * data_type_size, 4096U / data_type_size), 1, _data_type, _fixed_point_position));
160 w[7].allocator()->init(TensorInfo(TensorShape(4096U * data_type_size, 1000U / data_type_size), 1, _data_type, _fixed_point_position));
161 }
162 else
163 {
164 w[5].allocator()->init(TensorInfo(TensorShape(4096U, 9216U), 1, _data_type, _fixed_point_position));
165 w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type, _fixed_point_position));
166 w[7].allocator()->init(TensorInfo(TensorShape(1000U, 4096U), 1, _data_type, _fixed_point_position));
167 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100168 }
169 }
170
171 void build()
172 {
173 input.allocator()->init(TensorInfo(TensorShape(227U, 227U, 3U, _batches), 1, _data_type, _fixed_point_position));
174 output.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type, _fixed_point_position));
175
176 // Initialize intermediate tensors
177 // Layer 1
178 conv1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
179 act1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
180 norm1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
181 pool1_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 96U, _batches), 1, _data_type, _fixed_point_position));
182 pool11_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates()));
183 pool12_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates(0, 0, 48)));
184 // Layer 2
185 conv2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
186 conv21_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates()));
187 conv22_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates(0, 0, 128)));
188 act2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
189 norm2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
190 pool2_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
191 // Layer 3
192 conv3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
193 act3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
194 act31_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
195 act32_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
196 // Layer 4
197 conv4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
198 conv41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
199 conv42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
200 act4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
201 act41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
202 act42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
203 // Layer 5
204 conv5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
205 conv51_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates()));
206 conv52_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates(0, 0, 128)));
207 act5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
208 pool5_out.allocator()->init(TensorInfo(TensorShape(6U, 6U, 256U, _batches), 1, _data_type, _fixed_point_position));
209 // Layer 6
210 fc6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
211 act6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
212 // Layer 7
213 fc7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
214 act7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
215 // Layer 8
216 fc8_out.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type, _fixed_point_position));
217
218 // Configure Layers
219 // Layer 1
220 TensorType *b0 = _reshaped_weights ? nullptr : &b[0];
Gian Marco Iodice559d7712017-08-08 08:38:09 +0100221 conv1.configure(&input, &w[0], b0, &conv1_out, PadStrideInfo(4, 4, 0, 0), WeightsInfo(_reshaped_weights, 11U, 11U, 96U));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100222 act1.configure(&conv1_out, &act1_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
223 norm1.configure(&act1_out, &norm1_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
224 pool1.configure(&norm1_out, &pool1_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
225 // Layer 2
Gian Marco Iodice559d7712017-08-08 08:38:09 +0100226 conv21.configure(pool11_out.get(), w21.get(), b21.get(), conv21_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U, 128U));
227 conv22.configure(pool12_out.get(), w22.get(), b22.get(), conv22_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U, 128U));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100228 act2.configure(&conv2_out, &act2_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
229 norm2.configure(&act2_out, &norm2_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
230 pool2.configure(&norm2_out, &pool2_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
231 // Layer 3
steniu01a629da12017-07-28 14:40:58 +0100232 TensorType *b2 = (_reshaped_weights && !_is_direct_conv) ? nullptr : &b[2];
Gian Marco Iodice559d7712017-08-08 08:38:09 +0100233 conv3.configure(&pool2_out, &w[2], b2, &conv3_out, PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 384U));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100234 act3.configure(&conv3_out, &act3_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
235 // Layer 4
Gian Marco Iodice559d7712017-08-08 08:38:09 +0100236 conv41.configure(act31_out.get(), w41.get(), b41.get(), conv41_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 192U));
237 conv42.configure(act32_out.get(), w42.get(), b42.get(), conv42_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 192U));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100238 act4.configure(&conv4_out, &act4_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
239 // Layer 5
Gian Marco Iodice559d7712017-08-08 08:38:09 +0100240 conv51.configure(act41_out.get(), w51.get(), b51.get(), conv51_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 128U));
241 conv52.configure(act42_out.get(), w52.get(), b52.get(), conv52_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 128U));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100242 act5.configure(&conv5_out, &act5_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
243 pool5.configure(&act5_out, &pool5_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
244 // Layer 6
245 fc6.configure(&pool5_out, &w[5], &b[5], &fc6_out, true, _reshaped_weights);
246 act6.configure(&fc6_out, &act6_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
247 // Layer 7
248 fc7.configure(&act6_out, &w[6], &b[6], &fc7_out, true, _reshaped_weights);
249 act7.configure(&fc7_out, &act7_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
250 // Layer 8
251 fc8.configure(&act7_out, &w[7], &b[7], &fc8_out, true, _reshaped_weights);
252 // Softmax
253 smx.configure(&fc8_out, &output);
254 }
255
256 void allocate()
257 {
258 input.allocator()->allocate();
259 output.allocator()->allocate();
260
261 if(!_reshaped_weights)
262 {
263 for(auto &wi : w)
264 {
265 wi.allocator()->allocate();
266 }
267
268 for(auto &bi : b)
269 {
270 bi.allocator()->allocate();
271 }
272 }
273 else
274 {
275 w[0].allocator()->allocate();
276 w[2].allocator()->allocate();
277 w[5].allocator()->allocate();
278 w[6].allocator()->allocate();
279 w[7].allocator()->allocate();
280
281 b[5].allocator()->allocate();
282 b[6].allocator()->allocate();
283 b[7].allocator()->allocate();
284
steniu01a629da12017-07-28 14:40:58 +0100285 if(!_is_direct_conv)
286 {
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100287 dynamic_cast<TensorType *>(w21.get())->allocator()->allocate();
288 dynamic_cast<TensorType *>(w22.get())->allocator()->allocate();
steniu01a629da12017-07-28 14:40:58 +0100289 dynamic_cast<TensorType *>(w41.get())->allocator()->allocate();
290 dynamic_cast<TensorType *>(w42.get())->allocator()->allocate();
291 dynamic_cast<TensorType *>(w51.get())->allocator()->allocate();
292 dynamic_cast<TensorType *>(w52.get())->allocator()->allocate();
293 }
294 else
295 {
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100296 b[1].allocator()->allocate();
steniu01a629da12017-07-28 14:40:58 +0100297 b[2].allocator()->allocate();
298 b[3].allocator()->allocate();
299 b[4].allocator()->allocate();
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100300 w[1].allocator()->allocate();
steniu01a629da12017-07-28 14:40:58 +0100301 w[3].allocator()->allocate();
302 w[4].allocator()->allocate();
303 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100304 }
305
306 conv1_out.allocator()->allocate();
307 act1_out.allocator()->allocate();
308 norm1_out.allocator()->allocate();
309 pool1_out.allocator()->allocate();
310 conv2_out.allocator()->allocate();
311 act2_out.allocator()->allocate();
312 norm2_out.allocator()->allocate();
313 pool2_out.allocator()->allocate();
314 conv3_out.allocator()->allocate();
315 act3_out.allocator()->allocate();
316 conv4_out.allocator()->allocate();
317 act4_out.allocator()->allocate();
318 conv5_out.allocator()->allocate();
319 act5_out.allocator()->allocate();
320 pool5_out.allocator()->allocate();
321 fc6_out.allocator()->allocate();
322 act6_out.allocator()->allocate();
323 fc7_out.allocator()->allocate();
324 act7_out.allocator()->allocate();
325 fc8_out.allocator()->allocate();
326 }
327
328 /** Fills the trainable parameters and input with random data. */
329 void fill_random()
330 {
331 library->fill_tensor_uniform(Accessor(input), 0);
332
333 if(!_reshaped_weights)
334 {
335 for(unsigned int i = 0; i < w.size(); ++i)
336 {
337 library->fill_tensor_uniform(Accessor(w[i]), i + 1);
338 library->fill_tensor_uniform(Accessor(b[i]), i + 10);
339 }
340 }
341 else
342 {
343 library->fill_tensor_uniform(Accessor(w[0]), 1);
344 library->fill_tensor_uniform(Accessor(w[2]), 2);
345
346 library->fill_tensor_uniform(Accessor(w[5]), 3);
347 library->fill_tensor_uniform(Accessor(b[5]), 4);
348 library->fill_tensor_uniform(Accessor(w[6]), 5);
349 library->fill_tensor_uniform(Accessor(b[6]), 6);
350 library->fill_tensor_uniform(Accessor(w[7]), 7);
351 library->fill_tensor_uniform(Accessor(b[7]), 8);
352
steniu01a629da12017-07-28 14:40:58 +0100353 if(!_is_direct_conv)
354 {
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100355 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w21.get())), 9);
356 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w22.get())), 10);
steniu01a629da12017-07-28 14:40:58 +0100357 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w41.get())), 11);
358 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w42.get())), 12);
359 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w51.get())), 13);
360 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w52.get())), 14);
361 }
362 else
363 {
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100364 library->fill_tensor_uniform(Accessor(w[1]), 9);
365 library->fill_tensor_uniform(Accessor(b[1]), 10);
steniu01a629da12017-07-28 14:40:58 +0100366 library->fill_tensor_uniform(Accessor(w[3]), 11);
367 library->fill_tensor_uniform(Accessor(b[3]), 12);
368 library->fill_tensor_uniform(Accessor(w[4]), 13);
369 library->fill_tensor_uniform(Accessor(b[4]), 14);
370 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100371 }
372 }
373
374#ifdef INTERNAL_ONLY
375 /** Fills the trainable parameters from binary files
376 *
377 * @param weights Files names containing the weights data
378 * @param biases Files names containing the bias data
379 */
380 void fill(std::vector<std::string> weights, std::vector<std::string> biases)
381 {
382 ARM_COMPUTE_ERROR_ON(weights.size() != w.size());
383 ARM_COMPUTE_ERROR_ON(biases.size() != b.size());
384 ARM_COMPUTE_ERROR_ON(_reshaped_weights);
385
386 for(unsigned int i = 0; i < weights.size(); ++i)
387 {
388 library->fill_layer_data(Accessor(w[i]), weights[i]);
389 library->fill_layer_data(Accessor(b[i]), biases[i]);
390 }
391 }
392
393 /** Feed input to network from file.
394 *
395 * @param name File name of containing the input data.
396 */
397 void feed(std::string name)
398 {
399 library->fill_layer_data(Accessor(input), name);
400 }
401#endif /* INTERNAL_ONLY */
402
403 /** Get the classification results.
404 *
405 * @return Vector containing the classified labels
406 */
407 std::vector<unsigned int> get_classifications()
408 {
409 std::vector<unsigned int> classified_labels;
410 Accessor output_accessor(output);
411
412 Window window;
413 window.set(Window::DimX, Window::Dimension(0, 1, 1));
414 for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
415 {
416 window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
417 }
418
419 execute_window_loop(window, [&](const Coordinates & id)
420 {
421 int max_idx = 0;
422 float val = 0;
423 const void *const out_ptr = output_accessor(id);
424 for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
425 {
426 float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
427 if(curr_val > val)
428 {
429 max_idx = l;
430 val = curr_val;
431 }
432 }
433 classified_labels.push_back(max_idx);
434 });
435 return classified_labels;
436 }
437
438 /** Clear all allocated memory from the tensor objects */
439 void clear()
440 {
441 // Free allocations
442 input.allocator()->free();
443 output.allocator()->free();
444
445 if(!_reshaped_weights)
446 {
447 for(auto &wi : w)
448 {
449 wi.allocator()->free();
450 }
451
452 for(auto &bi : b)
453 {
454 bi.allocator()->free();
455 }
456 }
457 else
458 {
459 w[0].allocator()->free();
460 w[2].allocator()->free();
461 w[5].allocator()->free();
462 w[6].allocator()->free();
463 w[7].allocator()->free();
464
465 b[5].allocator()->free();
466 b[6].allocator()->free();
467 b[7].allocator()->free();
steniu01a629da12017-07-28 14:40:58 +0100468
469 if(_is_direct_conv)
470 {
471 w[3].allocator()->free();
472 w[4].allocator()->free();
473 b[2].allocator()->free();
474 b[3].allocator()->free();
475 b[4].allocator()->free();
476 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100477 }
478
479 w21.reset();
480 w22.reset();
481 b21.reset();
482 b21.reset();
483 w41.reset();
484 w42.reset();
485 b41.reset();
486 b42.reset();
487 w51.reset();
488 w52.reset();
489 b51.reset();
490 b52.reset();
491
492 conv1_out.allocator()->free();
493 act1_out.allocator()->free();
494 norm1_out.allocator()->free();
495 pool1_out.allocator()->free();
496 conv2_out.allocator()->free();
497 act2_out.allocator()->free();
498 norm2_out.allocator()->free();
499 pool2_out.allocator()->free();
500 conv3_out.allocator()->free();
501 act3_out.allocator()->free();
502 conv4_out.allocator()->free();
503 act4_out.allocator()->free();
504 conv5_out.allocator()->free();
505 act5_out.allocator()->free();
506 pool5_out.allocator()->free();
507 fc6_out.allocator()->free();
508 act6_out.allocator()->free();
509 fc7_out.allocator()->free();
510 act7_out.allocator()->free();
511 fc8_out.allocator()->free();
512 }
513
514 /** Runs the model */
515 void run()
516 {
517 // Layer 1
518 conv1.run();
519 act1.run();
520 norm1.run();
521 pool1.run();
522 // Layer 2
523 conv21.run();
524 conv22.run();
525 act2.run();
526 norm2.run();
527 pool2.run();
528 // Layer 3
529 conv3.run();
530 act3.run();
531 // Layer 4
532 conv41.run();
533 conv42.run();
534 act4.run();
535 // Layer 5
536 conv51.run();
537 conv52.run();
538 act5.run();
539 pool5.run();
540 // Layer 6
541 fc6.run();
542 act6.run();
543 // Layer 7
544 fc7.run();
545 act7.run();
546 // Layer 8
547 fc8.run();
548 // Softmax
549 smx.run();
550 }
551
552private:
steniu01a629da12017-07-28 14:40:58 +0100553 struct DirectConv
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100554 {
steniu01a629da12017-07-28 14:40:58 +0100555 template <typename ConvolutionLayerFunction1 = ConvolutionLayerFunction, typename DirectConvolutionLayerFunction1 = DirectConvolutionLayerFunction>
556 typename std::enable_if < !std::is_same<ConvolutionLayerFunction1, DirectConvolutionLayerFunction1>::value, void >::type
557 configure(ITensorType *input, const ITensorType *weights, const ITensorType *biases, ITensorType *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo())
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100558 {
steniu01a629da12017-07-28 14:40:58 +0100559 _func.configure(input, weights, biases, output, conv_info);
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100560 }
steniu01a629da12017-07-28 14:40:58 +0100561
562 template <typename ConvolutionLayerFunction1 = ConvolutionLayerFunction, typename DirectConvolutionLayerFunction1 = DirectConvolutionLayerFunction>
563 typename std::enable_if<std::is_same<ConvolutionLayerFunction1, DirectConvolutionLayerFunction1>::value, void>::type
564 configure(ITensorType *input, const ITensorType *weights, const ITensorType *biases, ITensorType *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo())
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100565 {
steniu01a629da12017-07-28 14:40:58 +0100566 _func.configure(input, weights, biases, output, conv_info, weights_info);
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100567 }
steniu01a629da12017-07-28 14:40:58 +0100568
569 void run()
570 {
571 _func.run();
572 }
573
574 DirectConvolutionLayerFunction _func{};
575 };
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100576
577 DataType _data_type{ DataType::UNKNOWN };
578 int _fixed_point_position{ 0 };
579 unsigned int _batches{ 0 };
580 bool _reshaped_weights{ false };
steniu01a629da12017-07-28 14:40:58 +0100581 bool _is_direct_conv{ !std::is_same<ConvolutionLayerFunction, DirectConvolutionLayerFunction>::value };
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100582
583 ActivationLayerFunction act1{}, act2{}, act3{}, act4{}, act5{}, act6{}, act7{};
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100584 ConvolutionLayerFunction conv1{};
585 DirectConv conv21{}, conv22{}, conv3{}, conv41{}, conv42{}, conv51{}, conv52{};
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100586 FullyConnectedLayerFunction fc6{}, fc7{}, fc8{};
587 NormalizationLayerFunction norm1{}, norm2{};
588 PoolingLayerFunction pool1{}, pool2{}, pool5{};
589 SoftmaxLayerFunction smx{};
590
591 TensorType input{}, output{};
592 std::array<TensorType, 8> w{ {} }, b{ {} };
593 std::unique_ptr<ITensorType> w21{ nullptr }, w22{ nullptr }, b21{ nullptr }, b22{ nullptr };
594 std::unique_ptr<ITensorType> w41{ nullptr }, w42{ nullptr }, b41{ nullptr }, b42{ nullptr };
595 std::unique_ptr<ITensorType> w51{ nullptr }, w52{ nullptr }, b51{ nullptr }, b52{ nullptr };
596
597 TensorType conv1_out{}, act1_out{}, norm1_out{}, pool1_out{};
598 TensorType conv2_out{}, act2_out{}, pool2_out{}, norm2_out{};
599 TensorType conv3_out{}, act3_out{};
600 TensorType conv4_out{}, act4_out{};
601 TensorType conv5_out{}, act5_out{}, pool5_out{};
602 TensorType fc6_out{}, act6_out{};
603 TensorType fc7_out{}, act7_out{};
604 TensorType fc8_out{};
605
606 std::unique_ptr<SubTensorType> pool11_out{}, pool12_out{};
607 std::unique_ptr<SubTensorType> conv21_out{}, conv22_out{};
608 std::unique_ptr<SubTensorType> act31_out{}, act32_out{};
609 std::unique_ptr<SubTensorType> conv41_out{}, conv42_out{}, act41_out{}, act42_out{};
610 std::unique_ptr<SubTensorType> conv51_out{}, conv52_out{};
611};
612} // namespace networks
613} // namespace test
614} // namespace arm_compute
615#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__