blob: 819111f897d9b4c1d31e8e4e9e1c7c5f642b8966 [file] [log] [blame]
Moritz Pflanzeree493ae2017-07-05 10:52:21 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
25#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
26
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010027#include "arm_compute/runtime/NEON/NEScheduler.h"
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +010028#include "arm_compute/runtime/Tensor.h"
29
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010030#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/Utils.h"
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010033
34#include <memory>
35
36namespace arm_compute
37{
38namespace test
39{
40namespace networks
41{
42/** AlexNet model object */
43template <typename ITensorType,
44 typename TensorType,
45 typename SubTensorType,
46 typename Accessor,
47 typename ActivationLayerFunction,
48 typename ConvolutionLayerFunction,
steniu01a629da12017-07-28 14:40:58 +010049 typename DirectConvolutionLayerFunction,
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010050 typename FullyConnectedLayerFunction,
51 typename NormalizationLayerFunction,
52 typename PoolingLayerFunction,
53 typename SoftmaxLayerFunction>
54class AlexNetNetwork
55{
56public:
57 void init(DataType data_type, int fixed_point_position, int batches, bool reshaped_weights = false)
58 {
59 _data_type = data_type;
60 _fixed_point_position = fixed_point_position;
61 _batches = batches;
62 _reshaped_weights = reshaped_weights;
63
64 // Initialize weights and biases
65 if(!_reshaped_weights)
66 {
steniu01a629da12017-07-28 14:40:58 +010067 w[0].allocator()->init(TensorInfo(TensorShape(11U, 11U, 3U, 96U), 1, _data_type, _fixed_point_position));
68 b[0].allocator()->init(TensorInfo(TensorShape(96U), 1, _data_type, _fixed_point_position));
69 w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type, _fixed_point_position));
70 b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
71 w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type, _fixed_point_position));
72 b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
73 w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type, _fixed_point_position));
74 b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
75 w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type, _fixed_point_position));
76 b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
77 w[5].allocator()->init(TensorInfo(TensorShape(9216U, 4096U), 1, _data_type, _fixed_point_position));
78 b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
79 w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type, _fixed_point_position));
80 b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
81 w[7].allocator()->init(TensorInfo(TensorShape(4096U, 1000U), 1, _data_type, _fixed_point_position));
82 b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type, _fixed_point_position));
83
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010084 w11 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates()));
85 w12 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates(0, 0, 0, 128)));
86 b11 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates()));
87 b12 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates(128)));
steniu01a629da12017-07-28 14:40:58 +010088
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010089 w31 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates()));
90 w32 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates(0, 0, 0, 192)));
91 b31 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates()));
92 b32 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates(192)));
steniu01a629da12017-07-28 14:40:58 +010093
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +010094 w41 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates()));
95 w42 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates(0, 0, 0, 128)));
96 b41 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates()));
97 b42 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates(128)));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010098 }
99 else
100 {
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100101 auto reshape = [&](unsigned int width, unsigned int height, bool convolution_layer) -> TensorShape
Moritz Pflanzer95643d82017-08-31 17:10:18 +0100102 {
Moritz Pflanzer80373f62017-09-15 10:42:58 +0100103 const bool is_optimised = std::is_same<ITensorType, ITensor>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV7 && data_type == DataType::F32;
Moritz Pflanzer95643d82017-08-31 17:10:18 +0100104
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100105 if(convolution_layer && is_optimised)
106 {
107 return TensorShape{ height, width };
108 }
109 else
110 {
111 const int interleave_width = 16 / arm_compute::data_size_from_type(_data_type);
112
113 return TensorShape{ width * interleave_width, static_cast<unsigned int>(std::ceil(static_cast<float>(height) / interleave_width)) };
114 }
Moritz Pflanzer95643d82017-08-31 17:10:18 +0100115 };
steniu01a629da12017-07-28 14:40:58 +0100116
117 // Create tensor for the reshaped weights
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100118 w[0].allocator()->init(TensorInfo(reshape(366U, 96U, true), 1, _data_type, _fixed_point_position));
steniu01a629da12017-07-28 14:40:58 +0100119
120 // Configure the direct convolution's weights. Direct convolution doesn't need reshape weights
121 if(!_is_direct_conv)
122 {
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100123 auto w11_tensor = std::unique_ptr<TensorType>(new TensorType());
124 auto w12_tensor = std::unique_ptr<TensorType>(new TensorType());
125 auto w31_tensor = std::unique_ptr<TensorType>(new TensorType());
126 auto w32_tensor = std::unique_ptr<TensorType>(new TensorType());
steniu01a629da12017-07-28 14:40:58 +0100127 auto w41_tensor = std::unique_ptr<TensorType>(new TensorType());
128 auto w42_tensor = std::unique_ptr<TensorType>(new TensorType());
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100129 w11_tensor->allocator()->init(TensorInfo(reshape(1248U, 128U, true), 1, _data_type, _fixed_point_position));
130 w12_tensor->allocator()->init(TensorInfo(reshape(1248U, 128U, true), 1, _data_type, _fixed_point_position));
131 w31_tensor->allocator()->init(TensorInfo(reshape(1920U, 192U, true), 1, _data_type, _fixed_point_position));
132 w32_tensor->allocator()->init(TensorInfo(reshape(1920U, 192U, true), 1, _data_type, _fixed_point_position));
133 w41_tensor->allocator()->init(TensorInfo(reshape(1920U, 128U, true), 1, _data_type, _fixed_point_position));
134 w42_tensor->allocator()->init(TensorInfo(reshape(1920U, 128U, true), 1, _data_type, _fixed_point_position));
135 w[2].allocator()->init(TensorInfo(reshape(2560U, 384U, true), 1, _data_type, _fixed_point_position));
136 w11 = std::move(w11_tensor);
137 w12 = std::move(w12_tensor);
138 w31 = std::move(w31_tensor);
139 w32 = std::move(w32_tensor);
steniu01a629da12017-07-28 14:40:58 +0100140 w41 = std::move(w41_tensor);
141 w42 = std::move(w42_tensor);
steniu01a629da12017-07-28 14:40:58 +0100142 }
143 else
144 {
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100145 w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 48U, 256U), 1, _data_type, _fixed_point_position));
146 b[1].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
steniu01a629da12017-07-28 14:40:58 +0100147 w[2].allocator()->init(TensorInfo(TensorShape(3U, 3U, 256U, 384U), 1, _data_type, _fixed_point_position));
148 b[2].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
149 w[3].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 384U), 1, _data_type, _fixed_point_position));
150 b[3].allocator()->init(TensorInfo(TensorShape(384U), 1, _data_type, _fixed_point_position));
151 w[4].allocator()->init(TensorInfo(TensorShape(3U, 3U, 192U, 256U), 1, _data_type, _fixed_point_position));
152 b[4].allocator()->init(TensorInfo(TensorShape(256U), 1, _data_type, _fixed_point_position));
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100153 w11 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates()));
154 w12 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[1], TensorShape(5U, 5U, 48U, 128U), Coordinates(0, 0, 0, 128)));
155 b11 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates()));
156 b12 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[1], TensorShape(128U), Coordinates(128)));
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100157
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100158 w31 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates()));
159 w32 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[3], TensorShape(3U, 3U, 192U, 192U), Coordinates(0, 0, 0, 192)));
160 b31 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates()));
161 b32 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[3], TensorShape(192U), Coordinates(192)));
steniu01a629da12017-07-28 14:40:58 +0100162
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100163 w41 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates()));
164 w42 = std::unique_ptr<SubTensorType>(new SubTensorType(&w[4], TensorShape(3U, 3U, 192U, 128U), Coordinates(0, 0, 0, 128)));
165 b41 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates()));
166 b42 = std::unique_ptr<SubTensorType>(new SubTensorType(&b[4], TensorShape(128U), Coordinates(128)));
steniu01a629da12017-07-28 14:40:58 +0100167 }
168
169 b[5].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
170 b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
171 b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type, _fixed_point_position));
172
Gian Marco Iodiceedfa9f42017-08-15 11:45:22 +0100173 if(_batches > 1 && std::is_same<TensorType, Tensor>::value)
steniu01a629da12017-07-28 14:40:58 +0100174 {
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100175 w[5].allocator()->init(TensorInfo(reshape(9216U, 4096U, false), 1, _data_type, _fixed_point_position));
176 w[6].allocator()->init(TensorInfo(reshape(4096U, 4096U, false), 1, _data_type, _fixed_point_position));
177 w[7].allocator()->init(TensorInfo(reshape(4096U, 1000U, false), 1, _data_type, _fixed_point_position));
steniu01a629da12017-07-28 14:40:58 +0100178 }
179 else
180 {
181 w[5].allocator()->init(TensorInfo(TensorShape(4096U, 9216U), 1, _data_type, _fixed_point_position));
182 w[6].allocator()->init(TensorInfo(TensorShape(4096U, 4096U), 1, _data_type, _fixed_point_position));
183 w[7].allocator()->init(TensorInfo(TensorShape(1000U, 4096U), 1, _data_type, _fixed_point_position));
184 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100185 }
186 }
187
188 void build()
189 {
190 input.allocator()->init(TensorInfo(TensorShape(227U, 227U, 3U, _batches), 1, _data_type, _fixed_point_position));
191 output.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type, _fixed_point_position));
192
193 // Initialize intermediate tensors
194 // Layer 1
195 conv1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
196 act1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
197 norm1_out.allocator()->init(TensorInfo(TensorShape(55U, 55U, 96U, _batches), 1, _data_type, _fixed_point_position));
198 pool1_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 96U, _batches), 1, _data_type, _fixed_point_position));
199 pool11_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates()));
200 pool12_out = std::unique_ptr<SubTensorType>(new SubTensorType(&pool1_out, TensorShape(27U, 27U, 48U, _batches), Coordinates(0, 0, 48)));
201 // Layer 2
202 conv2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
203 conv21_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates()));
204 conv22_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv2_out, TensorShape(27U, 27U, 128U, _batches), Coordinates(0, 0, 128)));
205 act2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
206 norm2_out.allocator()->init(TensorInfo(TensorShape(27U, 27U, 256U, _batches), 1, _data_type, _fixed_point_position));
207 pool2_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
208 // Layer 3
209 conv3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
210 act3_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
211 act31_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
212 act32_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act3_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
213 // Layer 4
214 conv4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
215 conv41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
216 conv42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
217 act4_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 384U, _batches), 1, _data_type, _fixed_point_position));
218 act41_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates()));
219 act42_out = std::unique_ptr<SubTensorType>(new SubTensorType(&act4_out, TensorShape(13U, 13U, 192U, _batches), Coordinates(0, 0, 192)));
220 // Layer 5
221 conv5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
222 conv51_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates()));
223 conv52_out = std::unique_ptr<SubTensorType>(new SubTensorType(&conv5_out, TensorShape(13U, 13U, 128U, _batches), Coordinates(0, 0, 128)));
224 act5_out.allocator()->init(TensorInfo(TensorShape(13U, 13U, 256U, _batches), 1, _data_type, _fixed_point_position));
225 pool5_out.allocator()->init(TensorInfo(TensorShape(6U, 6U, 256U, _batches), 1, _data_type, _fixed_point_position));
226 // Layer 6
227 fc6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
228 act6_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
229 // Layer 7
230 fc7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
231 act7_out.allocator()->init(TensorInfo(TensorShape(4096U, _batches), 1, _data_type, _fixed_point_position));
232 // Layer 8
233 fc8_out.allocator()->init(TensorInfo(TensorShape(1000U, _batches), 1, _data_type, _fixed_point_position));
234
235 // Configure Layers
236 // Layer 1
237 TensorType *b0 = _reshaped_weights ? nullptr : &b[0];
Gian Marco Iodice559d7712017-08-08 08:38:09 +0100238 conv1.configure(&input, &w[0], b0, &conv1_out, PadStrideInfo(4, 4, 0, 0), WeightsInfo(_reshaped_weights, 11U, 11U, 96U));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100239 act1.configure(&conv1_out, &act1_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
240 norm1.configure(&act1_out, &norm1_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
241 pool1.configure(&norm1_out, &pool1_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
242 // Layer 2
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100243 conv21.configure(pool11_out.get(), w11.get(), b11.get(), conv21_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U, 128U));
244 conv22.configure(pool12_out.get(), w12.get(), b12.get(), conv22_out.get(), PadStrideInfo(1, 1, 2, 2), WeightsInfo(_reshaped_weights, 5U, 5U, 128U));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100245 act2.configure(&conv2_out, &act2_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
246 norm2.configure(&act2_out, &norm2_out, NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f));
247 pool2.configure(&norm2_out, &pool2_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
248 // Layer 3
steniu01a629da12017-07-28 14:40:58 +0100249 TensorType *b2 = (_reshaped_weights && !_is_direct_conv) ? nullptr : &b[2];
Gian Marco Iodice559d7712017-08-08 08:38:09 +0100250 conv3.configure(&pool2_out, &w[2], b2, &conv3_out, PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 384U));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100251 act3.configure(&conv3_out, &act3_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
252 // Layer 4
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100253 conv41.configure(act31_out.get(), w31.get(), b31.get(), conv41_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 192U));
254 conv42.configure(act32_out.get(), w32.get(), b32.get(), conv42_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 192U));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100255 act4.configure(&conv4_out, &act4_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
256 // Layer 5
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100257 conv51.configure(act41_out.get(), w41.get(), b41.get(), conv51_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 128U));
258 conv52.configure(act42_out.get(), w42.get(), b42.get(), conv52_out.get(), PadStrideInfo(1, 1, 1, 1), WeightsInfo(_reshaped_weights, 3U, 3U, 128U));
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100259 act5.configure(&conv5_out, &act5_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
260 pool5.configure(&act5_out, &pool5_out, PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)));
261 // Layer 6
262 fc6.configure(&pool5_out, &w[5], &b[5], &fc6_out, true, _reshaped_weights);
263 act6.configure(&fc6_out, &act6_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
264 // Layer 7
265 fc7.configure(&act6_out, &w[6], &b[6], &fc7_out, true, _reshaped_weights);
266 act7.configure(&fc7_out, &act7_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
267 // Layer 8
268 fc8.configure(&act7_out, &w[7], &b[7], &fc8_out, true, _reshaped_weights);
269 // Softmax
270 smx.configure(&fc8_out, &output);
271 }
272
273 void allocate()
274 {
275 input.allocator()->allocate();
276 output.allocator()->allocate();
277
278 if(!_reshaped_weights)
279 {
280 for(auto &wi : w)
281 {
282 wi.allocator()->allocate();
283 }
284
285 for(auto &bi : b)
286 {
287 bi.allocator()->allocate();
288 }
289 }
290 else
291 {
292 w[0].allocator()->allocate();
293 w[2].allocator()->allocate();
294 w[5].allocator()->allocate();
295 w[6].allocator()->allocate();
296 w[7].allocator()->allocate();
297
298 b[5].allocator()->allocate();
299 b[6].allocator()->allocate();
300 b[7].allocator()->allocate();
301
steniu01a629da12017-07-28 14:40:58 +0100302 if(!_is_direct_conv)
303 {
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100304 dynamic_cast<TensorType *>(w11.get())->allocator()->allocate();
305 dynamic_cast<TensorType *>(w12.get())->allocator()->allocate();
306 dynamic_cast<TensorType *>(w31.get())->allocator()->allocate();
307 dynamic_cast<TensorType *>(w32.get())->allocator()->allocate();
steniu01a629da12017-07-28 14:40:58 +0100308 dynamic_cast<TensorType *>(w41.get())->allocator()->allocate();
309 dynamic_cast<TensorType *>(w42.get())->allocator()->allocate();
steniu01a629da12017-07-28 14:40:58 +0100310 }
311 else
312 {
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100313 b[1].allocator()->allocate();
steniu01a629da12017-07-28 14:40:58 +0100314 b[2].allocator()->allocate();
315 b[3].allocator()->allocate();
316 b[4].allocator()->allocate();
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100317 w[1].allocator()->allocate();
steniu01a629da12017-07-28 14:40:58 +0100318 w[3].allocator()->allocate();
319 w[4].allocator()->allocate();
320 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100321 }
322
323 conv1_out.allocator()->allocate();
324 act1_out.allocator()->allocate();
325 norm1_out.allocator()->allocate();
326 pool1_out.allocator()->allocate();
327 conv2_out.allocator()->allocate();
328 act2_out.allocator()->allocate();
329 norm2_out.allocator()->allocate();
330 pool2_out.allocator()->allocate();
331 conv3_out.allocator()->allocate();
332 act3_out.allocator()->allocate();
333 conv4_out.allocator()->allocate();
334 act4_out.allocator()->allocate();
335 conv5_out.allocator()->allocate();
336 act5_out.allocator()->allocate();
337 pool5_out.allocator()->allocate();
338 fc6_out.allocator()->allocate();
339 act6_out.allocator()->allocate();
340 fc7_out.allocator()->allocate();
341 act7_out.allocator()->allocate();
342 fc8_out.allocator()->allocate();
343 }
344
345 /** Fills the trainable parameters and input with random data. */
346 void fill_random()
347 {
348 library->fill_tensor_uniform(Accessor(input), 0);
349
350 if(!_reshaped_weights)
351 {
352 for(unsigned int i = 0; i < w.size(); ++i)
353 {
354 library->fill_tensor_uniform(Accessor(w[i]), i + 1);
355 library->fill_tensor_uniform(Accessor(b[i]), i + 10);
356 }
357 }
358 else
359 {
360 library->fill_tensor_uniform(Accessor(w[0]), 1);
361 library->fill_tensor_uniform(Accessor(w[2]), 2);
362
363 library->fill_tensor_uniform(Accessor(w[5]), 3);
364 library->fill_tensor_uniform(Accessor(b[5]), 4);
365 library->fill_tensor_uniform(Accessor(w[6]), 5);
366 library->fill_tensor_uniform(Accessor(b[6]), 6);
367 library->fill_tensor_uniform(Accessor(w[7]), 7);
368 library->fill_tensor_uniform(Accessor(b[7]), 8);
369
steniu01a629da12017-07-28 14:40:58 +0100370 if(!_is_direct_conv)
371 {
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100372 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w11.get())), 9);
373 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w12.get())), 10);
374 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w31.get())), 11);
375 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w32.get())), 12);
376 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w41.get())), 13);
377 library->fill_tensor_uniform(Accessor(*dynamic_cast<TensorType *>(w42.get())), 14);
steniu01a629da12017-07-28 14:40:58 +0100378 }
379 else
380 {
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100381 library->fill_tensor_uniform(Accessor(w[1]), 9);
382 library->fill_tensor_uniform(Accessor(b[1]), 10);
steniu01a629da12017-07-28 14:40:58 +0100383 library->fill_tensor_uniform(Accessor(w[3]), 11);
384 library->fill_tensor_uniform(Accessor(b[3]), 12);
385 library->fill_tensor_uniform(Accessor(w[4]), 13);
386 library->fill_tensor_uniform(Accessor(b[4]), 14);
387 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100388 }
389 }
390
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100391 /** Fills the trainable parameters from binary files
392 *
393 * @param weights Files names containing the weights data
394 * @param biases Files names containing the bias data
395 */
396 void fill(std::vector<std::string> weights, std::vector<std::string> biases)
397 {
398 ARM_COMPUTE_ERROR_ON(weights.size() != w.size());
399 ARM_COMPUTE_ERROR_ON(biases.size() != b.size());
400 ARM_COMPUTE_ERROR_ON(_reshaped_weights);
401
402 for(unsigned int i = 0; i < weights.size(); ++i)
403 {
404 library->fill_layer_data(Accessor(w[i]), weights[i]);
405 library->fill_layer_data(Accessor(b[i]), biases[i]);
406 }
407 }
408
409 /** Feed input to network from file.
410 *
411 * @param name File name of containing the input data.
412 */
413 void feed(std::string name)
414 {
415 library->fill_layer_data(Accessor(input), name);
416 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100417
418 /** Get the classification results.
419 *
420 * @return Vector containing the classified labels
421 */
422 std::vector<unsigned int> get_classifications()
423 {
424 std::vector<unsigned int> classified_labels;
425 Accessor output_accessor(output);
426
427 Window window;
428 window.set(Window::DimX, Window::Dimension(0, 1, 1));
429 for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
430 {
431 window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
432 }
433
434 execute_window_loop(window, [&](const Coordinates & id)
435 {
436 int max_idx = 0;
437 float val = 0;
438 const void *const out_ptr = output_accessor(id);
439 for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
440 {
441 float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
442 if(curr_val > val)
443 {
444 max_idx = l;
445 val = curr_val;
446 }
447 }
448 classified_labels.push_back(max_idx);
449 });
450 return classified_labels;
451 }
452
453 /** Clear all allocated memory from the tensor objects */
454 void clear()
455 {
456 // Free allocations
457 input.allocator()->free();
458 output.allocator()->free();
459
460 if(!_reshaped_weights)
461 {
462 for(auto &wi : w)
463 {
464 wi.allocator()->free();
465 }
466
467 for(auto &bi : b)
468 {
469 bi.allocator()->free();
470 }
471 }
472 else
473 {
474 w[0].allocator()->free();
475 w[2].allocator()->free();
476 w[5].allocator()->free();
477 w[6].allocator()->free();
478 w[7].allocator()->free();
479
480 b[5].allocator()->free();
481 b[6].allocator()->free();
482 b[7].allocator()->free();
steniu01a629da12017-07-28 14:40:58 +0100483
484 if(_is_direct_conv)
485 {
486 w[3].allocator()->free();
487 w[4].allocator()->free();
488 b[2].allocator()->free();
489 b[3].allocator()->free();
490 b[4].allocator()->free();
491 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100492 }
493
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100494 w11.reset();
495 w12.reset();
496 b11.reset();
497 b11.reset();
498 w31.reset();
499 w32.reset();
500 b31.reset();
501 b32.reset();
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100502 w41.reset();
503 w42.reset();
504 b41.reset();
505 b42.reset();
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100506
507 conv1_out.allocator()->free();
508 act1_out.allocator()->free();
509 norm1_out.allocator()->free();
510 pool1_out.allocator()->free();
511 conv2_out.allocator()->free();
512 act2_out.allocator()->free();
513 norm2_out.allocator()->free();
514 pool2_out.allocator()->free();
515 conv3_out.allocator()->free();
516 act3_out.allocator()->free();
517 conv4_out.allocator()->free();
518 act4_out.allocator()->free();
519 conv5_out.allocator()->free();
520 act5_out.allocator()->free();
521 pool5_out.allocator()->free();
522 fc6_out.allocator()->free();
523 act6_out.allocator()->free();
524 fc7_out.allocator()->free();
525 act7_out.allocator()->free();
526 fc8_out.allocator()->free();
527 }
528
529 /** Runs the model */
530 void run()
531 {
532 // Layer 1
533 conv1.run();
534 act1.run();
535 norm1.run();
536 pool1.run();
537 // Layer 2
538 conv21.run();
539 conv22.run();
540 act2.run();
541 norm2.run();
542 pool2.run();
543 // Layer 3
544 conv3.run();
545 act3.run();
546 // Layer 4
547 conv41.run();
548 conv42.run();
549 act4.run();
550 // Layer 5
551 conv51.run();
552 conv52.run();
553 act5.run();
554 pool5.run();
555 // Layer 6
556 fc6.run();
557 act6.run();
558 // Layer 7
559 fc7.run();
560 act7.run();
561 // Layer 8
562 fc8.run();
563 // Softmax
564 smx.run();
565 }
566
Joel Liang1c5ffd62017-12-28 10:09:51 +0800567 /** Sync the results */
568 void sync()
569 {
570 sync_if_necessary<TensorType>();
571 sync_tensor_if_necessary<TensorType>(output);
572 }
573
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100574private:
steniu01a629da12017-07-28 14:40:58 +0100575 struct DirectConv
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100576 {
steniu01a629da12017-07-28 14:40:58 +0100577 template <typename ConvolutionLayerFunction1 = ConvolutionLayerFunction, typename DirectConvolutionLayerFunction1 = DirectConvolutionLayerFunction>
578 typename std::enable_if < !std::is_same<ConvolutionLayerFunction1, DirectConvolutionLayerFunction1>::value, void >::type
579 configure(ITensorType *input, const ITensorType *weights, const ITensorType *biases, ITensorType *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo())
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100580 {
steniu01a629da12017-07-28 14:40:58 +0100581 _func.configure(input, weights, biases, output, conv_info);
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100582 }
steniu01a629da12017-07-28 14:40:58 +0100583
584 template <typename ConvolutionLayerFunction1 = ConvolutionLayerFunction, typename DirectConvolutionLayerFunction1 = DirectConvolutionLayerFunction>
585 typename std::enable_if<std::is_same<ConvolutionLayerFunction1, DirectConvolutionLayerFunction1>::value, void>::type
586 configure(ITensorType *input, const ITensorType *weights, const ITensorType *biases, ITensorType *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo())
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100587 {
steniu01a629da12017-07-28 14:40:58 +0100588 _func.configure(input, weights, biases, output, conv_info, weights_info);
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100589 }
steniu01a629da12017-07-28 14:40:58 +0100590
591 void run()
592 {
593 _func.run();
594 }
595
596 DirectConvolutionLayerFunction _func{};
597 };
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100598
599 DataType _data_type{ DataType::UNKNOWN };
600 int _fixed_point_position{ 0 };
601 unsigned int _batches{ 0 };
602 bool _reshaped_weights{ false };
steniu01a629da12017-07-28 14:40:58 +0100603 bool _is_direct_conv{ !std::is_same<ConvolutionLayerFunction, DirectConvolutionLayerFunction>::value };
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100604
605 ActivationLayerFunction act1{}, act2{}, act3{}, act4{}, act5{}, act6{}, act7{};
Gian Marco Iodice2e448682017-08-22 10:40:47 +0100606 ConvolutionLayerFunction conv1{};
607 DirectConv conv21{}, conv22{}, conv3{}, conv41{}, conv42{}, conv51{}, conv52{};
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100608 FullyConnectedLayerFunction fc6{}, fc7{}, fc8{};
609 NormalizationLayerFunction norm1{}, norm2{};
610 PoolingLayerFunction pool1{}, pool2{}, pool5{};
611 SoftmaxLayerFunction smx{};
612
613 TensorType input{}, output{};
614 std::array<TensorType, 8> w{ {} }, b{ {} };
Moritz Pflanzerbeabe3b2017-08-31 14:56:32 +0100615 std::unique_ptr<ITensorType> w11{ nullptr }, w12{ nullptr }, b11{ nullptr }, b12{ nullptr };
616 std::unique_ptr<ITensorType> w31{ nullptr }, w32{ nullptr }, b31{ nullptr }, b32{ nullptr };
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100617 std::unique_ptr<ITensorType> w41{ nullptr }, w42{ nullptr }, b41{ nullptr }, b42{ nullptr };
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100618
619 TensorType conv1_out{}, act1_out{}, norm1_out{}, pool1_out{};
620 TensorType conv2_out{}, act2_out{}, pool2_out{}, norm2_out{};
621 TensorType conv3_out{}, act3_out{};
622 TensorType conv4_out{}, act4_out{};
623 TensorType conv5_out{}, act5_out{}, pool5_out{};
624 TensorType fc6_out{}, act6_out{};
625 TensorType fc7_out{}, act7_out{};
626 TensorType fc8_out{};
627
628 std::unique_ptr<SubTensorType> pool11_out{}, pool12_out{};
629 std::unique_ptr<SubTensorType> conv21_out{}, conv22_out{};
630 std::unique_ptr<SubTensorType> act31_out{}, act32_out{};
631 std::unique_ptr<SubTensorType> conv41_out{}, conv42_out{}, act41_out{}, act42_out{};
632 std::unique_ptr<SubTensorType> conv51_out{}, conv52_out{};
633};
634} // namespace networks
635} // namespace test
636} // namespace arm_compute
637#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__