blob: 8553472fb9f302003a00d3946dfc0d6bf415847d [file] [log] [blame]
Gunes Bayir1dc6ff12022-12-06 20:48:31 +00001/*
2 * Copyright (c) 2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE
25#define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE
26
27#include "arm_compute/core/CL/CLKernelLibrary.h"
28#include "arm_compute/core/TensorInfo.h"
29#include "arm_compute/core/Types.h"
30#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
31#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
32#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
33
34#include "tests/framework/Fixture.h"
35#include "tests/validation/reference/DepthConvertLayer.h"
36
37using namespace arm_compute::experimental::dynamic_fusion;
38
39namespace arm_compute
40{
41namespace test
42{
43namespace validation
44{
45template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
46class DynamicFusionCastValidationFixture : public framework::Fixture
47{
48public:
49 template <typename...>
50 void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy)
51 {
52 _target = compute_target(shape, dt_in, dt_out, policy);
53 _reference = compute_reference(shape, dt_in, dt_out, policy);
54 }
55
56protected:
57 template <typename U>
58 void fill(U &&tensor, int i, DataType dt_in, DataType dt_out)
59 {
60 // Restricting range to avoid inf values
61 if(dt_out == DataType::F16)
62 {
63 constexpr int signed_min = -32000;
64 constexpr int signed_max = 32000;
65 constexpr int unsigned_min = 0;
66 constexpr int unsigned_max = 65000;
67
68 switch(dt_in)
69 {
70 case DataType::U8:
71 case DataType::QASYMM8:
72 case DataType::QASYMM8_SIGNED:
73 case DataType::S8:
74 case DataType::F32:
75 {
76 library->fill_tensor_uniform(tensor, i);
77 break;
78 }
79 case DataType::U16:
80 {
81 library->fill_tensor_uniform(tensor, i, static_cast<uint16_t>(unsigned_min), static_cast<uint16_t>(unsigned_max));
82 break;
83 }
84 case DataType::S16:
85 {
86 library->fill_tensor_uniform(tensor, i, static_cast<int16_t>(signed_min), static_cast<int16_t>(signed_max));
87 break;
88 }
89 case DataType::U32:
90 {
91 library->fill_tensor_uniform(tensor, i, static_cast<uint32_t>(unsigned_min), static_cast<uint32_t>(unsigned_max));
92 break;
93 }
94 case DataType::S32:
95 {
96 library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(signed_min), static_cast<int32_t>(signed_max));
97 break;
98 }
99 default:
100 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
101 }
102 }
103 else
104 {
105 library->fill_tensor_uniform(tensor, i);
106 }
107 }
108
109 // Given input is in nchw format
110 TensorType compute_target(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
111 {
112 // Create a new workload sketch
113 auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
114 auto gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
115 GpuWorkloadSketch sketch{ &gpu_ctx };
116
117 // Create sketch tensors
118 TensorInfo src_info = sketch.create_tensor_info(TensorInfo(shape, 1, dt_in, DataLayout::NCHW)); // layout is not important
119 TensorInfo dst_info = sketch.create_tensor_info();
120
121 CastAttributes attributes;
122 attributes.convert_policy(policy).data_type(dt_out);
123
124 FunctionType::create_op(sketch, &src_info, &dst_info, attributes);
125
126 // Configure runtime
127 ClWorkloadRuntime runtime;
128 runtime.configure(sketch);
129
130 // (Important) Allocate auxiliary tensor memory if there are any
131 for(auto &data : runtime.get_auxiliary_tensors())
132 {
133 auto tensor = data.first;
134 const auto aux_mem_req = data.second;
135 tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment);
136 tensor->allocator()->allocate();
137 }
138
139 // Construct user tensors
140 TensorType t_src{};
141 TensorType t_dst{};
142
143 // Initialize user tensors
144 t_src.allocator()->init(src_info);
145 t_dst.allocator()->init(dst_info);
146
147 // Allocate and fill user tensors
148 t_src.allocator()->allocate();
149 t_dst.allocator()->allocate();
150
151 fill(AccessorType(t_src), 0, dt_in, dt_out);
152
153 // Run runtime
154 runtime.run({ &t_src, &t_dst });
155 return t_dst;
156 }
157
158 SimpleTensor<T2> compute_reference(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
159 {
160 // Create reference
161 SimpleTensor<T1> src{ shape, dt_in, 1 };
162
163 // Fill reference
164 fill(src, 0, dt_in, dt_out);
165
166 return reference::depth_convert<T1, T2>(src, dt_out, policy, 0);
167 }
168
169 TensorType _target{};
170 SimpleTensor<T2> _reference{};
171};
172} // namespace validation
173} // namespace test
174} // namespace arm_compute
175#endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE */