blob: 08b90c5b52dc178e0957491485912f4ebd99c0b6 [file] [log] [blame]
Michalis Spyroucaa7dee2019-09-09 19:23:39 +01001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_UNIT_DYNAMIC_TENSOR
25#define ARM_COMPUTE_TEST_UNIT_DYNAMIC_TENSOR
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
29#include "tests/AssetsLibrary.h"
30#include "tests/Globals.h"
31#include "tests/IAccessor.h"
32#include "tests/framework/Asserts.h"
33#include "tests/framework/Fixture.h"
34#include "tests/validation/Helpers.h"
Georgios Pinitas2ff00092019-09-30 16:50:08 +010035#include "tests/validation/reference/ConvolutionLayer.h"
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010036#include "tests/validation/reference/NormalizationLayer.h"
37
38namespace arm_compute
39{
40namespace test
41{
42namespace validation
43{
44namespace
45{
46template <typename AllocatorType,
47 typename LifetimeMgrType,
48 typename PoolMgrType,
49 typename MemoryMgrType>
50struct MemoryManagementService
51{
52public:
Georgios Pinitas2ff00092019-09-30 16:50:08 +010053 using LftMgrType = LifetimeMgrType;
54
55public:
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010056 MemoryManagementService()
57 : allocator(), lifetime_mgr(nullptr), pool_mgr(nullptr), mm(nullptr), mg(), num_pools(0)
58 {
59 lifetime_mgr = std::make_shared<LifetimeMgrType>();
60 pool_mgr = std::make_shared<PoolMgrType>();
61 mm = std::make_shared<MemoryMgrType>(lifetime_mgr, pool_mgr);
62 mg = MemoryGroup(mm);
63 }
64
65 void populate(size_t pools)
66 {
67 mm->populate(allocator, pools);
68 num_pools = pools;
69 }
70
71 void clear()
72 {
73 mm->clear();
74 num_pools = 0;
75 }
76
77 void validate(bool validate_finalized) const
78 {
79 ARM_COMPUTE_EXPECT(mm->pool_manager() != nullptr, framework::LogLevel::ERRORS);
80 ARM_COMPUTE_EXPECT(mm->lifetime_manager() != nullptr, framework::LogLevel::ERRORS);
81
82 if(validate_finalized)
83 {
84 ARM_COMPUTE_EXPECT(mm->lifetime_manager()->are_all_finalized(), framework::LogLevel::ERRORS);
85 }
86 ARM_COMPUTE_EXPECT(mm->pool_manager()->num_pools() == num_pools, framework::LogLevel::ERRORS);
87 }
88
89 AllocatorType allocator;
90 std::shared_ptr<LifetimeMgrType> lifetime_mgr;
91 std::shared_ptr<PoolMgrType> pool_mgr;
92 std::shared_ptr<MemoryMgrType> mm;
93 MemoryGroup mg;
94 size_t num_pools;
95};
Georgios Pinitasb785dd42019-09-19 12:09:32 +010096
97template <typename MemoryMgrType, typename FuncType, typename ITensorType>
98class SimpleFunctionWrapper
99{
100public:
101 SimpleFunctionWrapper(std::shared_ptr<MemoryMgrType> mm)
102 : _func(mm)
103 {
104 }
105 void configure(ITensorType *src, ITensorType *dst)
106 {
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100107 ARM_COMPUTE_UNUSED(src, dst);
Georgios Pinitasb785dd42019-09-19 12:09:32 +0100108 }
109 void run()
110 {
111 _func.run();
112 }
113
114private:
115 FuncType _func;
116};
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100117} // namespace
118
119/** Simple test case to run a single function with different shapes twice.
120 *
121 * Runs a specified function twice, where the second time the size of the input/output is different
122 * Internal memory of the function and input/output are managed by different services
123 */
124template <typename TensorType,
125 typename AccessorType,
Georgios Pinitas2ff00092019-09-30 16:50:08 +0100126 typename MemoryManagementServiceType,
Georgios Pinitasb785dd42019-09-19 12:09:32 +0100127 typename SimpleFunctionWrapperType>
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100128class DynamicTensorType3SingleFunction : public framework::Fixture
129{
Georgios Pinitas2ff00092019-09-30 16:50:08 +0100130 using T = float;
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100131
132public:
133 template <typename...>
134 void setup(TensorShape input_level0, TensorShape input_level1)
135 {
136 input_l0 = input_level0;
137 input_l1 = input_level1;
138 run();
139 }
140
141protected:
142 void run()
143 {
144 MemoryManagementServiceType serv_internal;
145 MemoryManagementServiceType serv_cross;
146 const size_t num_pools = 1;
147 const bool validate_finalized = true;
148
149 // Create Tensor shapes.
150 TensorShape level_0 = TensorShape(input_l0);
151 TensorShape level_1 = TensorShape(input_l1);
152
153 // Level 0
154 // Create tensors
Georgios Pinitasb785dd42019-09-19 12:09:32 +0100155 TensorType src = create_tensor<TensorType>(level_0, DataType::F32, 1);
156 TensorType dst = create_tensor<TensorType>(level_0, DataType::F32, 1);
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100157
158 serv_cross.mg.manage(&src);
159 serv_cross.mg.manage(&dst);
160
161 // Create and configure function
Georgios Pinitasb785dd42019-09-19 12:09:32 +0100162 SimpleFunctionWrapperType layer(serv_internal.mm);
163 layer.configure(&src, &dst);
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100164
165 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
166 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
167
168 // Allocate tensors
169 src.allocator()->allocate();
170 dst.allocator()->allocate();
171
172 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
173 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
174
175 // Populate and validate memory manager
176 serv_cross.populate(num_pools);
177 serv_internal.populate(num_pools);
178 serv_cross.validate(validate_finalized);
179 serv_internal.validate(validate_finalized);
180
181 // Extract lifetime manager meta-data information
182 internal_l0 = serv_internal.lifetime_mgr->info();
183 cross_l0 = serv_cross.lifetime_mgr->info();
184
185 // Acquire memory manager, fill tensors and compute functions
186 serv_cross.mg.acquire();
Georgios Pinitasb785dd42019-09-19 12:09:32 +0100187 arm_compute::test::library->fill_tensor_value(AccessorType(src), 12.f);
188 layer.run();
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100189 serv_cross.mg.release();
190
191 // Clear manager
192 serv_cross.clear();
193 serv_internal.clear();
194 serv_cross.validate(validate_finalized);
195 serv_internal.validate(validate_finalized);
196
197 // Level 1
198 // Update the tensor shapes
199 src.info()->set_tensor_shape(level_1);
200 dst.info()->set_tensor_shape(level_1);
201 src.info()->set_is_resizable(true);
202 dst.info()->set_is_resizable(true);
203
204 serv_cross.mg.manage(&src);
205 serv_cross.mg.manage(&dst);
206
207 // Re-configure the function
Georgios Pinitasb785dd42019-09-19 12:09:32 +0100208 layer.configure(&src, &dst);
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100209
210 // Allocate tensors
211 src.allocator()->allocate();
212 dst.allocator()->allocate();
213
214 // Populate and validate memory manager
215 serv_cross.populate(num_pools);
216 serv_internal.populate(num_pools);
217 serv_cross.validate(validate_finalized);
218 serv_internal.validate(validate_finalized);
219
220 // Extract lifetime manager meta-data information
221 internal_l1 = serv_internal.lifetime_mgr->info();
222 cross_l1 = serv_cross.lifetime_mgr->info();
223
224 // Compute functions
225 serv_cross.mg.acquire();
226 arm_compute::test::library->fill_tensor_value(AccessorType(src), 12.f);
Georgios Pinitasb785dd42019-09-19 12:09:32 +0100227 layer.run();
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100228 serv_cross.mg.release();
229
230 // Clear manager
231 serv_cross.clear();
232 serv_internal.clear();
233 serv_cross.validate(validate_finalized);
234 serv_internal.validate(validate_finalized);
235 }
236
237public:
Georgios Pinitas2ff00092019-09-30 16:50:08 +0100238 TensorShape input_l0{}, input_l1{};
239 typename MemoryManagementServiceType::LftMgrType::info_type internal_l0{}, internal_l1{};
240 typename MemoryManagementServiceType::LftMgrType::info_type cross_l0{}, cross_l1{};
241};
242
243/** Simple test case to run a single function with different shapes twice.
244 *
245 * Runs a specified function twice, where the second time the size of the input/output is different
246 * Internal memory of the function and input/output are managed by different services
247 */
248template <typename TensorType,
249 typename AccessorType,
250 typename MemoryManagementServiceType,
251 typename ComplexFunctionType>
252class DynamicTensorType3ComplexFunction : public framework::Fixture
253{
254 using T = float;
255
256public:
257 template <typename...>
258 void setup(std::vector<TensorShape> input_shapes, TensorShape weights_shape, TensorShape bias_shape, std::vector<TensorShape> output_shapes, PadStrideInfo info)
259 {
260 num_iterations = input_shapes.size();
261 _data_type = DataType::F32;
262 _data_layout = DataLayout::NHWC;
263 _input_shapes = input_shapes;
264 _output_shapes = output_shapes;
265 _weights_shape = weights_shape;
266 _bias_shape = bias_shape;
267 _info = info;
268
269 // Create function
270 _f_target = support::cpp14::make_unique<ComplexFunctionType>(_ms.mm);
271 }
272
273 void run_iteration(unsigned int idx)
274 {
275 auto input_shape = _input_shapes[idx];
276 auto output_shape = _output_shapes[idx];
277
278 dst_ref = run_reference(input_shape, _weights_shape, _bias_shape, output_shape, _info);
279 dst_target = run_target(input_shape, _weights_shape, _bias_shape, output_shape, _info, WeightsInfo());
280 }
281
282protected:
283 template <typename U>
284 void fill(U &&tensor, int i)
285 {
286 switch(tensor.data_type())
287 {
288 case DataType::F32:
289 {
290 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
291 library->fill(tensor, distribution, i);
292 break;
293 }
294 default:
295 library->fill_tensor_uniform(tensor, i);
296 }
297 }
298
299 TensorType run_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape,
300 PadStrideInfo info, WeightsInfo weights_info)
301 {
302 if(_data_layout == DataLayout::NHWC)
303 {
304 permute(input_shape, PermutationVector(2U, 0U, 1U));
305 permute(weights_shape, PermutationVector(2U, 0U, 1U));
306 permute(output_shape, PermutationVector(2U, 0U, 1U));
307 }
308
309 _weights_target = create_tensor<TensorType>(weights_shape, _data_type, 1, QuantizationInfo(), _data_layout);
310 _bias_target = create_tensor<TensorType>(bias_shape, _data_type, 1);
311
312 // Create tensors
313 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, QuantizationInfo(), _data_layout);
314 TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, QuantizationInfo(), _data_layout);
315
316 // Create and configure function
317 _f_target->configure(&src, &_weights_target, &_bias_target, &dst, info, weights_info);
318
319 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
320 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
321
322 // Allocate tensors
323 src.allocator()->allocate();
324 dst.allocator()->allocate();
325 _weights_target.allocator()->allocate();
326 _bias_target.allocator()->allocate();
327
328 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
329 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
330
331 // Fill tensors
332 fill(AccessorType(src), 0);
333 fill(AccessorType(_weights_target), 1);
334 fill(AccessorType(_bias_target), 2);
335
336 // Populate and validate memory manager
337 _ms.clear();
338 _ms.populate(1);
339 _ms.mg.acquire();
340
341 // Compute NEConvolutionLayer function
342 _f_target->run();
343 _ms.mg.release();
344
345 return dst;
346 }
347
348 SimpleTensor<T> run_reference(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info)
349 {
350 // Create reference
351 SimpleTensor<T> src{ input_shape, _data_type, 1 };
352 SimpleTensor<T> weights{ weights_shape, _data_type, 1 };
353 SimpleTensor<T> bias{ bias_shape, _data_type, 1 };
354
355 // Fill reference
356 fill(src, 0);
357 fill(weights, 1);
358 fill(bias, 2);
359
360 return reference::convolution_layer<T>(src, weights, bias, output_shape, info);
361 }
362
363public:
364 unsigned int num_iterations{ 0 };
365 SimpleTensor<T> dst_ref{};
366 TensorType dst_target{};
367
368private:
369 DataType _data_type{ DataType::UNKNOWN };
370 DataLayout _data_layout{ DataLayout::UNKNOWN };
371 PadStrideInfo _info{};
372 std::vector<TensorShape> _input_shapes{};
373 std::vector<TensorShape> _output_shapes{};
374 TensorShape _weights_shape{};
375 TensorShape _bias_shape{};
376 MemoryManagementServiceType _ms{};
377 TensorType _weights_target{};
378 TensorType _bias_target{};
379 std::unique_ptr<ComplexFunctionType> _f_target{};
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100380};
381} // namespace validation
382} // namespace test
383} // namespace arm_compute
384#endif /* ARM_COMPUTE_TEST_UNIT_DYNAMIC_TENSOR */