blob: 3c325d739c00f4fd90aa631480cbaf544cebadd5 [file] [log] [blame]
Jakub Sujak32741722022-11-25 16:43:18 +00001/*
Gunes Bayircc287732023-01-19 15:56:00 +00002 * Copyright (c) 2022-2023 Arm Limited.
Jakub Sujak32741722022-11-25 16:43:18 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE
25#define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE
26
27#include "arm_compute/core/CL/CLKernelLibrary.h"
28#include "arm_compute/core/TensorInfo.h"
29#include "arm_compute/core/Types.h"
30#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
31#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
Viet-Hoa Dob84e2532022-12-13 13:09:10 +000032#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
Jakub Sujak32741722022-11-25 16:43:18 +000033
34#include "tests/framework/Fixture.h"
35#include "tests/validation/reference/ActivationLayer.h"
36
37using namespace arm_compute::experimental::dynamic_fusion;
38
39namespace arm_compute
40{
41namespace test
42{
43namespace validation
44{
45template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
46class DynamicFusionClampValidationFixture : public framework::Fixture
47{
48public:
Jakub Sujak32741722022-11-25 16:43:18 +000049 void setup(TensorShape shape, ClampAttributes attributes, bool fuse, DataType data_type)
50 {
51 // CLAMP is implemented as LU_BOUNDED_RELU with the alpha and beta variables swapped.
52 ActivationLayerInfo act_info{ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, attributes.max_val(), attributes.min_val() };
53
54 _fuse = fuse;
55 _attributes = attributes;
56 _data_type = data_type;
57 _target = compute_target(shape, attributes);
58 _reference = compute_reference(shape, act_info);
59 }
60
61protected:
62 std::vector<T> get_boundary_values(T min, T max)
63 {
64 // This function will return a vector filled with the following values that can
65 // represent two partitions derived from equivalent partitioning.
66 // * Lower partition: min, min + delta, lower quarter (nominal), center - delta
67 // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max
68 const auto delta = is_data_type_float(_data_type) ? T(0.1f) : T(1);
69 const auto center_value = (min + max) / 2;
70 const auto lower_quarter = (min + center_value) / 2;
71 const auto upper_quarter = (center_value + max) / 2;
72
73 std::vector<T> boundary_values{};
74
75 // To ensure all the inserted values are within the given range after subtracing/adding delta
76 auto insert_values = [&boundary_values, &min, &max](const std::initializer_list<T> &new_values)
77 {
78 for(auto &v : new_values)
79 {
80 if(v >= min && v <= max)
81 {
82 boundary_values.emplace_back(v);
83 }
84 }
85 };
86
87 insert_values({ min, static_cast<T>(min + delta), static_cast<T>(lower_quarter), static_cast<T>(center_value - delta) }); // lower partition
88 insert_values({ static_cast<T>(center_value), static_cast<T>(center_value + delta), static_cast<T>(upper_quarter), static_cast<T>(max - delta), max }); // upper partition
89
90 return boundary_values;
91 }
92
93 template <typename U>
94 void fill(U &&tensor)
95 {
Gunes Bayircc287732023-01-19 15:56:00 +000096 float min_bound = 0;
97 float max_bound = 0;
Jakub Sujak32741722022-11-25 16:43:18 +000098 std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, _data_type);
99 library->fill_static_values(tensor, get_boundary_values(static_cast<T>(min_bound), static_cast<T>(max_bound)));
100 }
101
102 TensorType compute_target(const TensorShape &shape, ClampAttributes attributes)
103 {
104 // Create a new workload sketch
105 CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
Viet-Hoa Do3fcf3dc2023-05-17 15:17:48 +0100106 GpuWorkloadContext context{ &cl_compile_ctx };
107 GpuWorkloadSketch sketch{ &context };
Jakub Sujak32741722022-11-25 16:43:18 +0000108
109 // Create sketch tensors
Viet-Hoa Do3fcf3dc2023-05-17 15:17:48 +0100110 TensorInfo src_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type));
111 TensorInfo dst_info = context.create_tensor_info(TensorInfo(shape, 1, _data_type));
Jakub Sujak32741722022-11-25 16:43:18 +0000112
Gunes Bayircc287732023-01-19 15:56:00 +0000113 ITensorInfo *ans_0_info = FunctionType::create_op(sketch, &src_info, attributes);
Jakub Sujak32741722022-11-25 16:43:18 +0000114 if(_fuse)
115 {
Gunes Bayircc287732023-01-19 15:56:00 +0000116 ITensorInfo *ans_1_info = FunctionType::create_op(sketch, ans_0_info, attributes);
117 GpuOutput::create_op(sketch, ans_1_info, &dst_info);
Viet-Hoa Dob84e2532022-12-13 13:09:10 +0000118 }
119 else
120 {
Gunes Bayircc287732023-01-19 15:56:00 +0000121 GpuOutput::create_op(sketch, ans_0_info, &dst_info);
Jakub Sujak32741722022-11-25 16:43:18 +0000122 }
123
124 // Configure runtime
125 ClWorkloadRuntime runtime;
126 runtime.configure(sketch);
127
128 // Construct user tensors
129 TensorType t_src{};
Viet-Hoa Dob84e2532022-12-13 13:09:10 +0000130 TensorType t_dst{};
Jakub Sujak32741722022-11-25 16:43:18 +0000131
132 // Initialize user tensors
133 t_src.allocator()->init(src_info);
Viet-Hoa Dob84e2532022-12-13 13:09:10 +0000134 t_dst.allocator()->init(dst_info);
Jakub Sujak32741722022-11-25 16:43:18 +0000135
136 // Allocate and fill user tensors
137 t_src.allocator()->allocate();
Viet-Hoa Dob84e2532022-12-13 13:09:10 +0000138 t_dst.allocator()->allocate();
Jakub Sujak32741722022-11-25 16:43:18 +0000139
140 fill(AccessorType(t_src));
141
142 // Run runtime
Viet-Hoa Dob84e2532022-12-13 13:09:10 +0000143 runtime.run({ &t_src, &t_dst });
Jakub Sujak32741722022-11-25 16:43:18 +0000144
Viet-Hoa Dob84e2532022-12-13 13:09:10 +0000145 return t_dst;
Jakub Sujak32741722022-11-25 16:43:18 +0000146 }
147
148 SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo act_info)
149 {
150 // Create reference
151 SimpleTensor<T> src{ shape, _data_type, 1, _quantization_info };
152
153 // Fill reference
154 fill(src);
155
156 auto dst = reference::activation_layer<T>(src, act_info, _quantization_info);
157 return dst;
158 }
159
160protected:
161 QuantizationInfo _quantization_info{};
162 ClampAttributes _attributes{};
163 bool _fuse{ false };
164 DataType _data_type{};
165 TensorType _target{};
166 SimpleTensor<T> _reference{};
167};
168} // namespace validation
169} // namespace test
170} // namespace arm_compute
171#endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE */