blob: efb67f8b1164f9a01e272f3699c32d36a9e03e90 [file] [log] [blame]
Mohammed Suhail Munshia18d85c2023-01-03 10:16:16 +00001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE
25#define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE
26
27#include "arm_compute/core/CL/CLKernelLibrary.h"
28#include "arm_compute/core/TensorInfo.h"
29#include "arm_compute/core/Types.h"
30#include "arm_compute/core/utils/misc/ShapeCalculator.h"
31
32#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
33#include "arm_compute/dynamic_fusion/sketch/attributes/Pool2dAttributes.h"
34#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
35#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h"
36#include "src/dynamic_fusion/utils/Utils.h"
37
38#include "tests/CL/CLAccessor.h"
39#include "tests/framework/Fixture.h"
40#include "tests/validation/reference/PoolingLayer.h"
41
42using namespace arm_compute::experimental::dynamic_fusion;
43
44namespace arm_compute
45{
46namespace test
47{
48namespace validation
49{
50template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
51class DynamicFusionGpuPool2dValidationGenericFixture : public framework::Fixture
52{
53public:
54 template <typename...>
55 void setup(TensorShape input_shape, const Pool2dAttributes &pool_attr, DataType data_type, bool mixed_precision)
56 {
57 _target = compute_target(input_shape, pool_attr, data_type, mixed_precision);
58 _reference = compute_reference(input_shape, convert_pool_attr_to_pool_info(pool_attr, mixed_precision), data_type);
59 }
60
61protected:
62 template <typename U>
63 void fill(U &&tensor, int i)
64 {
65 switch(tensor.data_type())
66 {
67 case DataType::F16:
68 {
69 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
70 library->fill(tensor, distribution, i);
71 break;
72 }
73 case DataType::F32:
74 {
75 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
76 library->fill(tensor, distribution, i);
77 break;
78 }
79 default:
80 library->fill_tensor_uniform(tensor, i);
81 }
82 }
83
84 // Given input is in nchw format
85 TensorType compute_target(TensorShape input_shape, const Pool2dAttributes &pool_attr, const DataType data_type, bool mixed_precision)
86 {
87 CLScheduler::get().default_reinit();
88
89 // Change shape due to NHWC data layout, test shapes are NCHW
90 permute(input_shape, PermutationVector(2U, 0U, 1U));
91
92 // Create a new workload sketch
93 auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
94 auto gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
95 GpuWorkloadSketch sketch{ &gpu_ctx };
96
97 // Create sketch tensors
98 auto input_info = sketch.create_tensor_info(TensorInfo(input_shape, 1, data_type, DataLayout::NHWC));
99 auto dst_info = sketch.create_tensor_info();
100
101 // Create Pool2dSettings
102 GpuPool2dSettings pool_settings = GpuPool2dSettings().mixed_precision(mixed_precision);
103
104 FunctionType::create_op(sketch, &input_info, &dst_info, pool_attr, pool_settings);
105
106 // Configure runtime
107 ClWorkloadRuntime runtime;
108 runtime.configure(sketch);
109 // (Important) Allocate auxiliary tensor memory if there are any
110 for(auto &data : runtime.get_auxiliary_tensors())
111 {
112 auto tensor = data.first;
113 const auto aux_mem_req = data.second;
114 tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment);
115 tensor->allocator()->allocate(); // Use ACL allocated memory
116 }
117 // Construct user tensors
118 TensorType t_input{};
119 TensorType t_dst{};
120
121 // Initialize user tensors
122 t_input.allocator()->init(input_info);
123 t_dst.allocator()->init(dst_info);
124
125 // Allocate and fill user tensors
126 t_input.allocator()->allocate();
127 t_dst.allocator()->allocate();
128
129 fill(AccessorType(t_input), 0);
130
131 // Run runtime
132 runtime.run({ &t_input, &t_dst });
133 return t_dst;
134 }
135
136 SimpleTensor<T> compute_reference(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type)
137 {
138 // Create reference
139 SimpleTensor<T> src(shape, data_type, 1, QuantizationInfo());
140 // Fill reference
141 fill(src, 0);
142 return reference::pooling_layer<T>(src, pool_info, QuantizationInfo(), nullptr, DataLayout::NCHW);
143 }
144
145 TensorType _target{};
146 SimpleTensor<T> _reference{};
147};
148
149template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
150class DynamicFusionGpuPool2dValidationFixture : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
151{
152public:
153 template <typename...>
154 void setup(TensorShape input_shape, PoolingType pool_type, Size2D pool_size, Padding2D pad, Size2D stride, bool exclude_padding, DataType data_type)
155 {
156 DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape,
157 Pool2dAttributes().pool_type(pool_type).pool_size(pool_size).pad(pad).stride(stride).exclude_padding(exclude_padding),
158 data_type, false);
159 }
160};
161
162template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
163class DynamicFusionGpuPool2dMixedPrecisionValidationFixture : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
164{
165public:
166 template <typename...>
167 void setup(TensorShape input_shape, PoolingType pool_type, Size2D pool_size, Padding2D pad, Size2D stride, bool exclude_padding, DataType data_type, bool mixed_precision)
168 {
169 DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape,
170 Pool2dAttributes().pool_type(pool_type).pool_size(pool_size).pad(pad).stride(stride).exclude_padding(exclude_padding),
171 data_type, mixed_precision);
172 }
173};
174
175template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
176class DynamicFusionGpuPool2dSpecialValidationFixture : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
177{
178public:
179 template <typename...>
180 void setup(TensorShape input_shape, Pool2dAttributes pool_attr, DataType data_type)
181 {
182 DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, pool_attr, data_type, false);
183 }
184};
185
186} // namespace validation
187} // namespace test
188} // namespace arm_compute
189
190#endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE */