blob: 9656c497ea81e34eb4ab3327362fe18c8ee2c066 [file] [log] [blame]
Viet-Hoa Do98aca0f2023-03-02 17:43:45 +00001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_ACTIVATIONFIXTURE
26#define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_ACTIVATIONFIXTURE
27
28#include "arm_compute/core/CL/CLKernelLibrary.h"
29#include "arm_compute/core/TensorInfo.h"
30#include "arm_compute/core/Types.h"
31#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
32#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
33#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
34
35#include "tests/framework/Fixture.h"
36#include "tests/validation/reference/ActivationLayer.h"
37
38using namespace arm_compute::experimental::dynamic_fusion;
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
46template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename... TArgs>
47class DynamicFusionActivationValidationFixture : public framework::Fixture
48{
49public:
50 template <typename...>
51 void setup(TensorShape shape, bool fuse, DataType data_type, ActivationLayerInfo act_info, TArgs... args)
52 {
53 _fuse = fuse;
54 _data_type = data_type;
55 _function = act_info.activation();
56 _target = compute_target(shape, args...);
57 _reference = compute_reference(shape, act_info);
58 }
59
60protected:
61 std::vector<T> get_boundary_values(T min, T max)
62 {
63 // This function will return a vector filled with the following values that can
64 // represent two partitions derived from equivalent partitioning.
65 // * Lower partition: min, min + delta, lower quarter (nominal), center - delta
66 // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max
67 const auto delta = is_data_type_float(_data_type) ? T(0.1f) : T(1);
68 const auto center_value = (min + max) / 2;
69 const auto lower_quarter = (min + center_value) / 2;
70 const auto upper_quarter = (center_value + max) / 2;
71
72 std::vector<T> boundary_values{};
73
74 // To ensure all the inserted values are within the given range after subtracing/adding delta
75 auto insert_values = [&boundary_values, &min, &max](const std::initializer_list<T> &new_values)
76 {
77 for(auto &v : new_values)
78 {
79 if(v >= min && v <= max)
80 {
81 boundary_values.emplace_back(v);
82 }
83 }
84 };
85
86 insert_values({ min, static_cast<T>(min + delta), static_cast<T>(lower_quarter), static_cast<T>(center_value - delta) }); // lower partition
87 insert_values({ static_cast<T>(center_value), static_cast<T>(center_value + delta), static_cast<T>(upper_quarter), static_cast<T>(max - delta), max }); // upper partition
88
89 return boundary_values;
90 }
91
92 template <typename U>
93 void fill(U &&tensor)
94 {
95 float min_bound = 0;
96 float max_bound = 0;
97 std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type);
98 library->fill_static_values(tensor, get_boundary_values(static_cast<T>(min_bound), static_cast<T>(max_bound)));
99 }
100
101 TensorType compute_target(const TensorShape &shape, TArgs... args)
102 {
103 // Create a new workload sketch
104 CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
105 GpuWorkloadContext gpu_ctx{ &cl_compile_ctx };
106 GpuWorkloadSketch sketch{ &gpu_ctx };
107
108 // Create sketch tensors
109 TensorInfo src_info = sketch.create_tensor_info(TensorInfo(shape, 1, _data_type));
110 TensorInfo dst_info = sketch.create_tensor_info(TensorInfo(shape, 1, _data_type));
111
112 ITensorInfo *ans_0_info = FunctionType::create_op(sketch, &src_info, args...);
113 if(_fuse)
114 {
115 ITensorInfo *ans_1_info = FunctionType::create_op(sketch, ans_0_info, args...);
116 GpuOutput::create_op(sketch, ans_1_info, &dst_info);
117 }
118 else
119 {
120 GpuOutput::create_op(sketch, ans_0_info, &dst_info);
121 }
122
123 // Configure runtime
124 ClWorkloadRuntime runtime;
125 runtime.configure(sketch);
126
127 // Construct user tensors
128 TensorType t_src{};
129 TensorType t_dst{};
130
131 // Initialize user tensors
132 t_src.allocator()->init(src_info);
133 t_dst.allocator()->init(dst_info);
134
135 // Allocate and fill user tensors
136 t_src.allocator()->allocate();
137 t_dst.allocator()->allocate();
138
139 fill(AccessorType(t_src));
140
141 // Run runtime
142 runtime.run({ &t_src, &t_dst });
143
144 return t_dst;
145 }
146
147 SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo act_info)
148 {
149 // Create reference
150 SimpleTensor<T> src{ shape, _data_type, 1 };
151
152 // Fill reference
153 fill(src);
154
155 auto tmp = reference::activation_layer<T>(src, act_info);
156
157 if(_fuse)
158 {
159 auto dst = reference::activation_layer<T>(tmp, act_info);
160 return dst;
161 }
162 else
163 {
164 return tmp;
165 }
166 }
167
168protected:
169 ActivationLayerInfo::ActivationFunction _function{};
170 bool _fuse{ false };
171 DataType _data_type{};
172 TensorType _target{};
173 SimpleTensor<T> _reference{};
174};
175
176template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
177class DynamicFusionSigmoidValidationFixture : public DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T>
178{
179public:
180 template <typename...>
181 void setup(TensorShape shape, bool fuse, DataType data_type)
182 {
183 ActivationLayerInfo act_info{ ActivationLayerInfo::ActivationFunction::LOGISTIC };
184 DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, fuse, data_type, act_info);
185 }
186};
187
188template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
189class DynamicFusionTanhValidationFixture : public DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T>
190{
191public:
192 template <typename...>
193 void setup(TensorShape shape, bool fuse, DataType data_type)
194 {
195 ActivationLayerInfo act_info{ ActivationLayerInfo::ActivationFunction::TANH };
196 DynamicFusionActivationValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, fuse, data_type, act_info);
197 }
198};
199
200} // namespace validation
201} // namespace test
202} // namespace arm_compute
203
204#endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_ACTIVATIONFIXTURE */