blob: 373cc574c8888fc20f555ec308ee462fb4c30fb5 [file] [log] [blame]
Michalis Spyroucaa7dee2019-09-09 19:23:39 +01001/*
Sheri Zhangac6499a2021-02-10 15:32:38 +00002 * Copyright (c) 2019-2021 Arm Limited.
Michalis Spyroucaa7dee2019-09-09 19:23:39 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/Allocator.h"
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010025#include "arm_compute/runtime/MemoryManagerOnDemand.h"
Georgios Pinitas2ff00092019-09-30 16:50:08 +010026#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010027#include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h"
28#include "arm_compute/runtime/OffsetLifetimeManager.h"
29#include "arm_compute/runtime/PoolManager.h"
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010030#include "tests/AssetsLibrary.h"
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010031#include "tests/NEON/Accessor.h"
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010032#include "tests/framework/Asserts.h"
33#include "tests/framework/Macros.h"
34#include "tests/framework/datasets/Datasets.h"
Georgios Pinitas2ff00092019-09-30 16:50:08 +010035#include "tests/validation/Validation.h"
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010036#include "tests/validation/fixtures/UNIT/DynamicTensorFixture.h"
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010037
38namespace arm_compute
39{
40namespace test
41{
42namespace validation
43{
Georgios Pinitasb785dd42019-09-19 12:09:32 +010044namespace
45{
Georgios Pinitas2ff00092019-09-30 16:50:08 +010046constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
47RelativeTolerance<float> tolerance_f32(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
48constexpr float tolerance_num = 0.07f; /**< Tolerance number */
Michalis Spyrou5cb49dc2019-12-03 13:42:25 +000049} // namespace
50#ifndef DOXYGEN_SKIP_THIS
Georgios Pinitasb785dd42019-09-19 12:09:32 +010051using NENormLayerWrapper = SimpleFunctionWrapper<MemoryManagerOnDemand, NENormalizationLayer, ITensor>;
52template <>
53void NENormLayerWrapper::configure(arm_compute::ITensor *src, arm_compute::ITensor *dst)
54{
55 _func.configure(src, dst, NormalizationLayerInfo(NormType::CROSS_MAP, 3));
56}
Michalis Spyrou5cb49dc2019-12-03 13:42:25 +000057#endif // DOXYGEN_SKIP_THIS
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010058TEST_SUITE(NEON)
59TEST_SUITE(UNIT)
60TEST_SUITE(DynamicTensor)
Georgios Pinitas2ff00092019-09-30 16:50:08 +010061
62using OffsetMemoryManagementService = MemoryManagementService<Allocator, OffsetLifetimeManager, PoolManager, MemoryManagerOnDemand>;
63using NEDynamicTensorType3SingleFunction = DynamicTensorType3SingleFunction<Tensor, Accessor, OffsetMemoryManagementService, NENormLayerWrapper>;
Michalis Spyroucaa7dee2019-09-09 19:23:39 +010064
65/** Tests the memory manager with dynamic input and output tensors.
66 *
67 * Create and manage the tensors needed to run a simple function. After the function is executed,
68 * change the input and output size requesting more memory and go through the manage/allocate process.
69 * The memory manager should be able to update the inner structures and allocate the requested memory
70 * */
71FIXTURE_DATA_TEST_CASE(DynamicTensorType3Single, NEDynamicTensorType3SingleFunction, framework::DatasetMode::ALL,
72 framework::dataset::zip(framework::dataset::make("Level0Shape", { TensorShape(12U, 11U, 3U), TensorShape(256U, 8U, 12U) }),
73 framework::dataset::make("Level1Shape", { TensorShape(67U, 31U, 15U), TensorShape(11U, 2U, 3U) })))
74{
75 if(input_l0.total_size() < input_l1.total_size())
76 {
77 ARM_COMPUTE_EXPECT(internal_l0.size < internal_l1.size, framework::LogLevel::ERRORS);
78 ARM_COMPUTE_EXPECT(cross_l0.size < cross_l1.size, framework::LogLevel::ERRORS);
79 }
80 else
81 {
82 ARM_COMPUTE_EXPECT(internal_l0.size == internal_l1.size, framework::LogLevel::ERRORS);
83 ARM_COMPUTE_EXPECT(cross_l0.size == cross_l1.size, framework::LogLevel::ERRORS);
84 }
85}
86
Georgios Pinitas2ff00092019-09-30 16:50:08 +010087using NEDynamicTensorType3ComplexFunction = DynamicTensorType3ComplexFunction<Tensor, Accessor, OffsetMemoryManagementService, NEConvolutionLayer>;
88/** Tests the memory manager with dynamic input and output tensors.
89 *
90 * Create and manage the tensors needed to run a complex function. After the function is executed,
91 * change the input and output size requesting more memory and go through the manage/allocate process.
92 * The memory manager should be able to update the inner structures and allocate the requested memory
93 * */
94FIXTURE_DATA_TEST_CASE(DynamicTensorType3Complex, NEDynamicTensorType3ComplexFunction, framework::DatasetMode::ALL,
95 framework::dataset::zip(framework::dataset::zip(framework::dataset::zip(framework::dataset::zip(
96 framework::dataset::make("InputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 6U), TensorShape(128U, 128U, 6U) } }),
97 framework::dataset::make("WeightsManager", { TensorShape(3U, 3U, 6U, 3U) })),
98 framework::dataset::make("BiasShape", { TensorShape(3U) })),
99 framework::dataset::make("OutputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 3U), TensorShape(128U, 128U, 3U) } })),
100 framework::dataset::make("PadStrideInfo", { PadStrideInfo(1U, 1U, 1U, 1U) })))
101{
102 for(unsigned int i = 0; i < num_iterations; ++i)
103 {
104 run_iteration(i);
105 validate(Accessor(dst_target), dst_ref, tolerance_f32, tolerance_num, absolute_tolerance_float);
106 }
107}
108
Georgios Pinitas3d426c52019-10-10 19:35:43 +0100109using NEDynamicTensorType2PipelineFunction = DynamicTensorType2PipelineFunction<Tensor, Accessor, OffsetMemoryManagementService, NEConvolutionLayer>;
110/** Tests the memory manager with dynamic input and output tensors.
111 *
112 * Create and manage the tensors needed to run a pipeline. After the function is executed, resize the input size and rerun.
113 */
114FIXTURE_DATA_TEST_CASE(DynamicTensorType2Pipeline, NEDynamicTensorType2PipelineFunction, framework::DatasetMode::ALL,
115 framework::dataset::make("InputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 6U), TensorShape(128U, 128U, 6U) } }))
116{
117}
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100118TEST_SUITE_END() // DynamicTensor
119TEST_SUITE_END() // UNIT
Sheri Zhangac6499a2021-02-10 15:32:38 +0000120TEST_SUITE_END() // Neon
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100121} // namespace validation
122} // namespace test
123} // namespace arm_compute