blob: 9cfd59284c74ef75c19efe77c037bde742d30379 [file] [log] [blame]
Moritz Pflanzeree493ae2017-07-05 10:52:21 +01001/*
Alex Gildayc357c472018-03-21 13:54:09 +00002 * Copyright (c) 2017-2018 ARM Limited.
Moritz Pflanzeree493ae2017-07-05 10:52:21 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_LENET5_H__
25#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_LENET5_H__
26
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010027#include "tests/AssetsLibrary.h"
28#include "tests/Globals.h"
29#include "tests/Utils.h"
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010030
31#include <memory>
32
33using namespace arm_compute;
34using namespace arm_compute::test;
35
36namespace arm_compute
37{
38namespace test
39{
40namespace networks
41{
42/** Lenet5 model object */
43template <typename TensorType,
44 typename Accessor,
45 typename ActivationLayerFunction,
46 typename ConvolutionLayerFunction,
47 typename FullyConnectedLayerFunction,
48 typename PoolingLayerFunction,
49 typename SoftmaxLayerFunction>
50class LeNet5Network
51{
52public:
Alex Gildayc357c472018-03-21 13:54:09 +000053 /** Initialize the network.
54 *
55 * @param[in] batches Number of batches.
56 */
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010057 void init(int batches)
58 {
59 _batches = batches;
60
61 // Initialize input, output, weights and biases
62 input.allocator()->init(TensorInfo(TensorShape(28U, 28U, 1U, _batches), 1, DataType::F32));
63 output.allocator()->init(TensorInfo(TensorShape(10U, _batches), 1, DataType::F32));
64 w[0].allocator()->init(TensorInfo(TensorShape(5U, 5U, 1U, 20U), 1, DataType::F32));
65 b[0].allocator()->init(TensorInfo(TensorShape(20U), 1, DataType::F32));
66 w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 20U, 50U), 1, DataType::F32));
67 b[1].allocator()->init(TensorInfo(TensorShape(50U), 1, DataType::F32));
68 w[2].allocator()->init(TensorInfo(TensorShape(800U, 500U), 1, DataType::F32));
69 b[2].allocator()->init(TensorInfo(TensorShape(500U), 1, DataType::F32));
70 w[3].allocator()->init(TensorInfo(TensorShape(500U, 10U), 1, DataType::F32));
71 b[3].allocator()->init(TensorInfo(TensorShape(10U), 1, DataType::F32));
72 }
73
74 /** Build the model. */
75 void build()
76 {
77 // Initialize intermediate tensors
78 // Layer 1
79 conv1_out.allocator()->init(TensorInfo(TensorShape(24U, 24U, 20U, _batches), 1, DataType::F32));
80 pool1_out.allocator()->init(TensorInfo(TensorShape(12U, 12U, 20U, _batches), 1, DataType::F32));
81 // Layer 2
82 conv2_out.allocator()->init(TensorInfo(TensorShape(8U, 8U, 50U, _batches), 1, DataType::F32));
83 pool2_out.allocator()->init(TensorInfo(TensorShape(4U, 4U, 50U, _batches), 1, DataType::F32));
84 // Layer 3
85 fc1_out.allocator()->init(TensorInfo(TensorShape(500U, _batches), 1, DataType::F32));
86 act1_out.allocator()->init(TensorInfo(TensorShape(500U, _batches), 1, DataType::F32));
87 // Layer 6
88 fc2_out.allocator()->init(TensorInfo(TensorShape(10U, _batches), 1, DataType::F32));
89
90 // Configure Layers
91 conv1.configure(&input, &w[0], &b[0], &conv1_out, PadStrideInfo(1, 1, 0, 0));
92 pool1.configure(&conv1_out, &pool1_out, PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0)));
93 conv2.configure(&pool1_out, &w[1], &b[1], &conv2_out, PadStrideInfo(1, 1, 0, 0));
94 pool2.configure(&conv2_out, &pool2_out, PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0)));
95 fc1.configure(&pool2_out, &w[2], &b[2], &fc1_out);
96 act1.configure(&fc1_out, &act1_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
97 fc2.configure(&act1_out, &w[3], &b[3], &fc2_out);
98 smx.configure(&fc2_out, &output);
99 }
100
Alex Gildayc357c472018-03-21 13:54:09 +0000101 /** Allocate the network */
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100102 void allocate()
103 {
104 // Allocate tensors
105 input.allocator()->allocate();
106 output.allocator()->allocate();
107 for(auto &wi : w)
108 {
109 wi.allocator()->allocate();
110 }
111 for(auto &bi : b)
112 {
113 bi.allocator()->allocate();
114 }
115 conv1_out.allocator()->allocate();
116 pool1_out.allocator()->allocate();
117 conv2_out.allocator()->allocate();
118 pool2_out.allocator()->allocate();
119 fc1_out.allocator()->allocate();
120 act1_out.allocator()->allocate();
121 fc2_out.allocator()->allocate();
122 }
123
124 /** Fills the trainable parameters and input with random data. */
125 void fill_random()
126 {
127 std::uniform_real_distribution<> distribution(-1, 1);
128 library->fill(Accessor(input), distribution, 0);
129 for(unsigned int i = 0; i < w.size(); ++i)
130 {
131 library->fill(Accessor(w[i]), distribution, i + 1);
132 library->fill(Accessor(b[i]), distribution, i + 10);
133 }
134 }
135
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100136 /** Fills the trainable parameters from binary files
137 *
138 * @param weights Files names containing the weights data
139 * @param biases Files names containing the bias data
140 */
141 void fill(std::vector<std::string> weights, std::vector<std::string> biases)
142 {
143 ARM_COMPUTE_ERROR_ON(weights.size() != w.size());
144 ARM_COMPUTE_ERROR_ON(biases.size() != b.size());
145
146 for(unsigned int i = 0; i < weights.size(); ++i)
147 {
148 library->fill_layer_data(Accessor(w[i]), weights[i]);
149 library->fill_layer_data(Accessor(b[i]), biases[i]);
150 }
151 }
152
153 /** Feed input to network from file.
154 *
155 * @param name File name of containing the input data.
156 */
157 void feed(std::string name)
158 {
159 library->fill_layer_data(Accessor(input), name);
160 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100161
162 /** Get the classification results.
163 *
164 * @return Vector containing the classified labels
165 */
166 std::vector<unsigned int> get_classifications()
167 {
168 std::vector<unsigned int> classified_labels;
169 Accessor output_accessor(output);
170
171 Window window;
172 window.set(Window::DimX, Window::Dimension(0, 1, 1));
173 for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
174 {
175 window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
176 }
177
178 execute_window_loop(window, [&](const Coordinates & id)
179 {
180 int max_idx = 0;
181 float val = 0;
182 const void *const out_ptr = output_accessor(id);
183 for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
184 {
185 float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
186 if(curr_val > val)
187 {
188 max_idx = l;
189 val = curr_val;
190 }
191 }
192 classified_labels.push_back(max_idx);
193 });
194 return classified_labels;
195 }
196
197 /** Clear all allocated memory from the tensor objects */
198 void clear()
199 {
200 input.allocator()->free();
201 output.allocator()->free();
202 for(auto &wi : w)
203 {
204 wi.allocator()->free();
205 }
206 for(auto &bi : b)
207 {
208 bi.allocator()->free();
209 }
210
211 conv1_out.allocator()->free();
212 pool1_out.allocator()->free();
213 conv2_out.allocator()->free();
214 pool2_out.allocator()->free();
215 fc1_out.allocator()->free();
216 act1_out.allocator()->free();
217 fc2_out.allocator()->free();
218 }
219
220 /** Runs the model */
221 void run()
222 {
223 // Layer 1
224 conv1.run();
225 pool1.run();
226 // Layer 2
227 conv2.run();
228 pool2.run();
229 // Layer 3
230 fc1.run();
231 act1.run();
232 // Layer 4
233 fc2.run();
234 // Softmax
235 smx.run();
236 }
237
Joel Liang1c5ffd62017-12-28 10:09:51 +0800238 /** Sync the results */
239 void sync()
240 {
241 sync_if_necessary<TensorType>();
242 sync_tensor_if_necessary<TensorType>(output);
243 }
244
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100245private:
246 unsigned int _batches{ 0 };
247
248 ActivationLayerFunction act1{};
249 ConvolutionLayerFunction conv1{}, conv2{};
250 FullyConnectedLayerFunction fc1{}, fc2{};
251 PoolingLayerFunction pool1{}, pool2{};
252 SoftmaxLayerFunction smx{};
253
254 TensorType input{}, output{};
255 std::array<TensorType, 4> w{ {} }, b{ {} };
256
257 TensorType conv1_out{}, pool1_out{};
258 TensorType conv2_out{}, pool2_out{};
259 TensorType fc1_out{}, act1_out{};
260 TensorType fc2_out{};
261};
262} // namespace networks
263} // namespace test
264} // namespace arm_compute
265#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_LENET5_H__