blob: f8aac30807e13252e7e1dd8c0c577e8641f17a24 [file] [log] [blame]
Moritz Pflanzeree493ae2017-07-05 10:52:21 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_LENET5_H__
25#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_LENET5_H__
26
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010027#include "tests/AssetsLibrary.h"
28#include "tests/Globals.h"
29#include "tests/Utils.h"
Moritz Pflanzeree493ae2017-07-05 10:52:21 +010030
31#include <memory>
32
33using namespace arm_compute;
34using namespace arm_compute::test;
35
36namespace arm_compute
37{
38namespace test
39{
40namespace networks
41{
42/** Lenet5 model object */
43template <typename TensorType,
44 typename Accessor,
45 typename ActivationLayerFunction,
46 typename ConvolutionLayerFunction,
47 typename FullyConnectedLayerFunction,
48 typename PoolingLayerFunction,
49 typename SoftmaxLayerFunction>
50class LeNet5Network
51{
52public:
53 void init(int batches)
54 {
55 _batches = batches;
56
57 // Initialize input, output, weights and biases
58 input.allocator()->init(TensorInfo(TensorShape(28U, 28U, 1U, _batches), 1, DataType::F32));
59 output.allocator()->init(TensorInfo(TensorShape(10U, _batches), 1, DataType::F32));
60 w[0].allocator()->init(TensorInfo(TensorShape(5U, 5U, 1U, 20U), 1, DataType::F32));
61 b[0].allocator()->init(TensorInfo(TensorShape(20U), 1, DataType::F32));
62 w[1].allocator()->init(TensorInfo(TensorShape(5U, 5U, 20U, 50U), 1, DataType::F32));
63 b[1].allocator()->init(TensorInfo(TensorShape(50U), 1, DataType::F32));
64 w[2].allocator()->init(TensorInfo(TensorShape(800U, 500U), 1, DataType::F32));
65 b[2].allocator()->init(TensorInfo(TensorShape(500U), 1, DataType::F32));
66 w[3].allocator()->init(TensorInfo(TensorShape(500U, 10U), 1, DataType::F32));
67 b[3].allocator()->init(TensorInfo(TensorShape(10U), 1, DataType::F32));
68 }
69
70 /** Build the model. */
71 void build()
72 {
73 // Initialize intermediate tensors
74 // Layer 1
75 conv1_out.allocator()->init(TensorInfo(TensorShape(24U, 24U, 20U, _batches), 1, DataType::F32));
76 pool1_out.allocator()->init(TensorInfo(TensorShape(12U, 12U, 20U, _batches), 1, DataType::F32));
77 // Layer 2
78 conv2_out.allocator()->init(TensorInfo(TensorShape(8U, 8U, 50U, _batches), 1, DataType::F32));
79 pool2_out.allocator()->init(TensorInfo(TensorShape(4U, 4U, 50U, _batches), 1, DataType::F32));
80 // Layer 3
81 fc1_out.allocator()->init(TensorInfo(TensorShape(500U, _batches), 1, DataType::F32));
82 act1_out.allocator()->init(TensorInfo(TensorShape(500U, _batches), 1, DataType::F32));
83 // Layer 6
84 fc2_out.allocator()->init(TensorInfo(TensorShape(10U, _batches), 1, DataType::F32));
85
86 // Configure Layers
87 conv1.configure(&input, &w[0], &b[0], &conv1_out, PadStrideInfo(1, 1, 0, 0));
88 pool1.configure(&conv1_out, &pool1_out, PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0)));
89 conv2.configure(&pool1_out, &w[1], &b[1], &conv2_out, PadStrideInfo(1, 1, 0, 0));
90 pool2.configure(&conv2_out, &pool2_out, PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0)));
91 fc1.configure(&pool2_out, &w[2], &b[2], &fc1_out);
92 act1.configure(&fc1_out, &act1_out, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
93 fc2.configure(&act1_out, &w[3], &b[3], &fc2_out);
94 smx.configure(&fc2_out, &output);
95 }
96
97 void allocate()
98 {
99 // Allocate tensors
100 input.allocator()->allocate();
101 output.allocator()->allocate();
102 for(auto &wi : w)
103 {
104 wi.allocator()->allocate();
105 }
106 for(auto &bi : b)
107 {
108 bi.allocator()->allocate();
109 }
110 conv1_out.allocator()->allocate();
111 pool1_out.allocator()->allocate();
112 conv2_out.allocator()->allocate();
113 pool2_out.allocator()->allocate();
114 fc1_out.allocator()->allocate();
115 act1_out.allocator()->allocate();
116 fc2_out.allocator()->allocate();
117 }
118
119 /** Fills the trainable parameters and input with random data. */
120 void fill_random()
121 {
122 std::uniform_real_distribution<> distribution(-1, 1);
123 library->fill(Accessor(input), distribution, 0);
124 for(unsigned int i = 0; i < w.size(); ++i)
125 {
126 library->fill(Accessor(w[i]), distribution, i + 1);
127 library->fill(Accessor(b[i]), distribution, i + 10);
128 }
129 }
130
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100131 /** Fills the trainable parameters from binary files
132 *
133 * @param weights Files names containing the weights data
134 * @param biases Files names containing the bias data
135 */
136 void fill(std::vector<std::string> weights, std::vector<std::string> biases)
137 {
138 ARM_COMPUTE_ERROR_ON(weights.size() != w.size());
139 ARM_COMPUTE_ERROR_ON(biases.size() != b.size());
140
141 for(unsigned int i = 0; i < weights.size(); ++i)
142 {
143 library->fill_layer_data(Accessor(w[i]), weights[i]);
144 library->fill_layer_data(Accessor(b[i]), biases[i]);
145 }
146 }
147
148 /** Feed input to network from file.
149 *
150 * @param name File name of containing the input data.
151 */
152 void feed(std::string name)
153 {
154 library->fill_layer_data(Accessor(input), name);
155 }
Moritz Pflanzeree493ae2017-07-05 10:52:21 +0100156
157 /** Get the classification results.
158 *
159 * @return Vector containing the classified labels
160 */
161 std::vector<unsigned int> get_classifications()
162 {
163 std::vector<unsigned int> classified_labels;
164 Accessor output_accessor(output);
165
166 Window window;
167 window.set(Window::DimX, Window::Dimension(0, 1, 1));
168 for(unsigned int d = 1; d < output_accessor.shape().num_dimensions(); ++d)
169 {
170 window.set(d, Window::Dimension(0, output_accessor.shape()[d], 1));
171 }
172
173 execute_window_loop(window, [&](const Coordinates & id)
174 {
175 int max_idx = 0;
176 float val = 0;
177 const void *const out_ptr = output_accessor(id);
178 for(unsigned int l = 0; l < output_accessor.shape().x(); ++l)
179 {
180 float curr_val = reinterpret_cast<const float *>(out_ptr)[l];
181 if(curr_val > val)
182 {
183 max_idx = l;
184 val = curr_val;
185 }
186 }
187 classified_labels.push_back(max_idx);
188 });
189 return classified_labels;
190 }
191
192 /** Clear all allocated memory from the tensor objects */
193 void clear()
194 {
195 input.allocator()->free();
196 output.allocator()->free();
197 for(auto &wi : w)
198 {
199 wi.allocator()->free();
200 }
201 for(auto &bi : b)
202 {
203 bi.allocator()->free();
204 }
205
206 conv1_out.allocator()->free();
207 pool1_out.allocator()->free();
208 conv2_out.allocator()->free();
209 pool2_out.allocator()->free();
210 fc1_out.allocator()->free();
211 act1_out.allocator()->free();
212 fc2_out.allocator()->free();
213 }
214
215 /** Runs the model */
216 void run()
217 {
218 // Layer 1
219 conv1.run();
220 pool1.run();
221 // Layer 2
222 conv2.run();
223 pool2.run();
224 // Layer 3
225 fc1.run();
226 act1.run();
227 // Layer 4
228 fc2.run();
229 // Softmax
230 smx.run();
231 }
232
233private:
234 unsigned int _batches{ 0 };
235
236 ActivationLayerFunction act1{};
237 ConvolutionLayerFunction conv1{}, conv2{};
238 FullyConnectedLayerFunction fc1{}, fc2{};
239 PoolingLayerFunction pool1{}, pool2{};
240 SoftmaxLayerFunction smx{};
241
242 TensorType input{}, output{};
243 std::array<TensorType, 4> w{ {} }, b{ {} };
244
245 TensorType conv1_out{}, pool1_out{};
246 TensorType conv2_out{}, pool2_out{};
247 TensorType fc1_out{}, act1_out{};
248 TensorType fc2_out{};
249};
250} // namespace networks
251} // namespace test
252} // namespace arm_compute
253#endif //__ARM_COMPUTE_TEST_MODEL_OBJECTS_LENET5_H__