blob: 58d2215e647e4f89144cb85c8f18668b9b8fa7ee [file] [log] [blame]
SiCong Lif44bbc52022-08-29 18:25:51 +01001/*
2 * Copyright (c) 2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "arm_compute/core/CL/CLKernelLibrary.h"
26#include "arm_compute/core/TensorInfo.h"
27#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
28#include "arm_compute/dynamic_fusion/sketch/OperatorAttributes.h"
29#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
30#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h"
Viet-Hoa Dob84e2532022-12-13 13:09:10 +000031#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
SiCong Lif44bbc52022-08-29 18:25:51 +010032
33#include "tests/CL/CLAccessor.h"
SiCong Lif44bbc52022-08-29 18:25:51 +010034#include "tests/framework/Macros.h"
SiCong Lif44bbc52022-08-29 18:25:51 +010035#include "tests/validation/Validation.h"
SiCong Li31df05a2022-11-09 15:57:48 +000036#include "tests/validation/dynamic_fusion/Utils.h"
SiCong Lif44bbc52022-08-29 18:25:51 +010037#include "tests/validation/reference/ConvolutionLayer.h"
SiCong Lif44bbc52022-08-29 18:25:51 +010038#include "tests/validation/reference/Permute.h"
39
SiCong Lif44bbc52022-08-29 18:25:51 +010040using namespace arm_compute::experimental::dynamic_fusion;
41using namespace arm_compute::test::validation::utils;
42
43namespace arm_compute
44{
45namespace test
46{
47namespace validation
48{
49TEST_SUITE(CL)
50TEST_SUITE(INTEGRATION)
51TEST_SUITE(DYNAMIC_FUSION)
52TEST_CASE(Conv2d, framework::DatasetMode::ALL)
53{
54 /* Computation:
55 * out = conv2d1x1(direct_conv)(input, weights, bias)
56 */
57 CLScheduler::get().default_reinit();
58
59 const auto data_type = DataType::F32;
60 const auto data_layout = DataLayout::NHWC;
61 const auto t_input_shape = TensorShape(384, 12, 12);
62 const auto t_weight_shape = TensorShape(384, 1, 1, 16);
63 const auto t_dst_shape = TensorShape(16, 12, 12);
64
65 // Create a new workload sketch
66 auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
67 auto gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
68 GpuWorkloadSketch sketch{ &gpu_ctx };
69
70 // Fuse conv2d
71 Conv2dAttributes conv2d_attr{};
72 auto input_info = sketch.create_tensor_info(t_input_shape, 1, data_type, data_layout);
73 auto weight_info = sketch.create_tensor_info(TensorInfo(t_weight_shape, 1, data_type, data_layout));
Viet-Hoa Dob84e2532022-12-13 13:09:10 +000074 auto ans_info = sketch.create_tensor_info();
75 GpuConv2d::create_op(sketch, &input_info, &weight_info, nullptr, &ans_info, conv2d_attr);
76
77 auto dst_info = sketch.create_tensor_info();
78 GpuOutput::create_op(sketch, &ans_info, &dst_info);
SiCong Lif44bbc52022-08-29 18:25:51 +010079
80 // Configure runtime
81 ClWorkloadRuntime runtime;
82 runtime.configure(sketch);
83
84 // (Important) Allocate auxiliary tensor memory if there are any
85 // Instead of using ACL allocated memory, the user can choose to import memory into the tensors
86 for(auto &data : runtime.get_auxiliary_tensors())
87 {
88 CLTensor *tensor = data.first;
89 AuxMemoryInfo aux_mem_req = data.second;
90 tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment);
91 tensor->allocator()->allocate(); // Use ACL allocated memory
92 // auto buf = cl::Buffer();
93 // tensor->allocator()->import_memory(buf); // Or, import external memory
94 }
95
96 // Construct user tensors
97 CLTensor t_input{};
98 CLTensor t_weight{};
99 CLTensor t_dst{};
100
101 // Initialize user tensors
102 t_input.allocator()->init(input_info);
103 t_weight.allocator()->init(weight_info);
104 t_dst.allocator()->init(dst_info);
105
106 // Allocate and fill user tensors
107 // Instead of using ACL allocator, the user can choose to import memory into the tensors
108 t_input.allocator()->allocate();
109 t_weight.allocator()->allocate();
110 t_dst.allocator()->allocate();
111 fill<float>(CLAccessor(t_input), 0, library.get());
112 fill<float>(CLAccessor(t_weight), 1, library.get());
113
114 // Run runtime
115 runtime.run({ &t_input, &t_weight, &t_dst });
116
117 // Create reference
118 SimpleTensor<float> ref_t_input{ t_input_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
119 SimpleTensor<float> ref_t_weight{ t_weight_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
120 SimpleTensor<float> ref_t_bias_placeholder{ t_dst_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
121
122 // Fill reference
123 fill<float>(ref_t_input, 0, library.get());
124 fill<float>(ref_t_weight, 1, library.get());
125
126 auto ref_t_input_nchw = reference::permute(ref_t_input, PermutationVector(1U, 2U, 0U));
127 auto ref_t_weight_nchw = reference::permute(ref_t_weight, PermutationVector(1U, 2U, 0U));
128 auto ref_t_bias_placeholder_nchw = reference::permute(ref_t_bias_placeholder, PermutationVector(1U, 2U, 0U));
129 auto t_dst_shape_nchw = t_dst_shape;
130 permute(t_dst_shape_nchw, PermutationVector(1U, 2U, 0U));
131
132 PadStrideInfo legacy_pad_stride(conv2d_attr.stride().x(), conv2d_attr.stride().y(), conv2d_attr.pad().left, conv2d_attr.pad().right, conv2d_attr.pad().top, conv2d_attr.pad().bottom,
133 DimensionRoundingType{});
134 auto ref_t_dst_nchw = reference::convolution_layer(ref_t_input_nchw, ref_t_weight_nchw, ref_t_bias_placeholder_nchw, t_dst_shape_nchw, legacy_pad_stride, conv2d_attr.dilation());
135 const auto ref_t_dst = reference::permute(ref_t_dst_nchw, PermutationVector(2U, 0U, 1U));
136
137 RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
138 validate(CLAccessor(t_dst), ref_t_dst_nchw, tolerance_f32);
139}
140TEST_SUITE(Invalid_Fusion_Should_Fail)
141TEST_CASE(Multiple_Complex_Ops_0, framework::DatasetMode::ALL)
142{
143 /* Computation:
144 * out = conv2d(conv2d(l0_input, l0_weight), l1_weight)
145 */
146 CLScheduler::get().default_reinit();
147
148 const auto data_type = DataType::F32;
149 const auto data_layout = DataLayout::NHWC;
150 const auto t_input_shape = TensorShape(384, 12, 12);
151 const auto t_weight_shape = TensorShape(384, 1, 1, 16);
SiCong Lif44bbc52022-08-29 18:25:51 +0100152 auto t_input_info = TensorInfo(t_input_shape, 1, data_type, data_layout);
153 auto t_weight_info = TensorInfo(t_weight_shape, 1, data_type, data_layout);
154 auto t_dst_info = TensorInfo();
155
156 Conv2dAttributes conv2d_attr{};
157
158 // Create a new workload sketch
159 auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
160 auto gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
161 GpuWorkloadSketch sketch{ &gpu_ctx };
162
163 // Create tensor infos
164 auto input_info = sketch.create_tensor_info(t_input_shape, 1, data_type, data_layout);
165 auto weight_info = sketch.create_tensor_info(TensorInfo(t_weight_shape, 1, data_type, data_layout));
166 auto dst_info = sketch.create_tensor_info();
167
168 // Fuse conv2d into the workload
169 {
170 // Validate operator
171 const auto success = GpuConv2d::validate_op(sketch, &input_info, &weight_info, nullptr, &dst_info, conv2d_attr);
172 ARM_COMPUTE_EXPECT(bool(success), framework::LogLevel::ERRORS);
173
174 GpuConv2d::create_op(sketch, &input_info, &weight_info, nullptr, &dst_info, conv2d_attr);
175 }
176
177 // Create tensor infos
178 auto weight_info_2 = sketch.create_tensor_info(t_weight_info);
179 auto dst_info_2 = sketch.create_tensor_info();
180
181 // Fuse conv2d into the workload
182 {
183 // Validate operator, should fail
184 const auto success = GpuConv2d::validate_op(sketch, &dst_info, &weight_info_2, nullptr, &dst_info_2, conv2d_attr);
185 ARM_COMPUTE_EXPECT(!bool(success), framework::LogLevel::ERRORS);
186 }
187}
188TEST_SUITE_END() // Invalid_Fusion_Should_Fail
189TEST_SUITE_END() // DYNAMIC_FUSION
190TEST_SUITE_END() // INTEGRATION
191TEST_SUITE_END() // CL
192} // namespace validation
193} // namespace test
194} // namespace arm_compute