blob: 3a8b7c8ce842c813ac6d70cea3ae8108d96f7339 [file] [log] [blame]
SiCong Lib63b1192022-01-28 18:24:39 +00001/*
2 * Copyright (c) 2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
SiCong Li4e9f5682022-05-10 10:15:59 +010025#ifdef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION
SiCong Lib63b1192022-01-28 18:24:39 +000026#include "arm_compute/core/TensorInfo.h"
27
28#include "arm_compute/core/CL/CLKernelLibrary.h"
29#include "arm_compute/core/experimental/ClWorkload.h"
30#include "arm_compute/runtime/CL/CLScheduler.h"
31#include "arm_compute/runtime/experimental/ClCompositeOperator.h"
32#include "src/core/experimental/dynamic_fusion/WorkloadImpl/ClKernelDescriptors.h"
33#include "src/gpu/cl/operators/ClAdd.h"
34#include "src/gpu/cl/operators/ClConv2d.h"
35#include "tests/CL/CLAccessor.h"
36#include "tests/framework/Asserts.h"
37#include "tests/framework/Macros.h"
38#include "tests/validation/CL/UNIT/dynamic_fusion/Utils.h"
39#include "tests/validation/Validation.h"
40
41#include "tests/validation/reference/ConvolutionLayer.h"
42#include "tests/validation/reference/ElementwiseOperations.h"
43#include "tests/validation/reference/Permute.h"
44
45#ifdef ARM_COMPUTE_ASSERTS_ENABLED
46#include "tests/SimpleTensorPrinter.h"
47#endif /* ARM_COMPUTE_ASSERTS_ENABLED */
48
49using namespace arm_compute::experimental::dynamic_fusion;
50using namespace arm_compute::test::validation::utils;
51
52namespace arm_compute
53{
54namespace test
55{
56namespace validation
57{
58TEST_SUITE(CL)
59TEST_SUITE(INTEGRATION)
60TEST_SUITE(DYNAMIC_FUSION)
61TEST_CASE(Operator_Fuse_Movenet_SubGraph_1_F32, framework::DatasetMode::ALL)
62{
63 // Please refer to: https://confluence.arm.com/pages/viewpage.action?pageId=886243697
64 /* Computation:
65 * out = add_desc(addend, conv2d1x1(direct_conv)(input, weights, bias))
66 */
67 const auto data_type = DataType::F32;
68 const auto data_layout = DataLayout::NHWC;
69 const auto t_input_shape = TensorShape(384, 12, 12);
70 // const auto t_weight_shape = TensorShape(384, 1, 1, 64);
71 // const auto t_dst_shape = TensorShape(64, 12, 12);
72 const auto t_weight_shape = TensorShape(384, 1, 1, 16);
73 const auto t_dst_shape = TensorShape(16, 12, 12);
74 auto t_input_info = TensorInfo(t_input_shape, 1, data_type, data_layout);
75 auto t_weight_info = TensorInfo(t_weight_shape, 1, data_type, data_layout);
76 auto t_l1_addend_info = TensorInfo(t_dst_shape, 1, data_type, data_layout);
77 auto t_acc_info = TensorInfo(); // Intermediate tensor for cond3
78 auto t_dst_info = TensorInfo();
79
Michalis Spyroub1fcefd2022-06-15 19:02:28 +010080 Conv2dDescriptor conv2d_desc{};
81 ElementwiseDescriptor add_desc{ ArithmeticOperation::ADD };
SiCong Lib63b1192022-01-28 18:24:39 +000082
83 // Create reference
84 SimpleTensor<float> ref_t_input{ t_input_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
85 SimpleTensor<float> ref_t_weight{ t_weight_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
86 SimpleTensor<float> ref_t_bias_placeholder{ t_dst_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
87 SimpleTensor<float> ref_t_l1_addend{ t_dst_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
88
89 // Fill reference
90 fill<float>(ref_t_input, 0, library.get());
91 fill<float>(ref_t_weight, 1, library.get());
92 fill<float>(ref_t_l1_addend, 2, library.get());
93
94 auto ref_t_input_nchw = reference::permute(ref_t_input, PermutationVector(1U, 2U, 0U));
95 auto ref_t_weight_nchw = reference::permute(ref_t_weight, PermutationVector(1U, 2U, 0U));
96 auto ref_t_bias_placeholder_nchw = reference::permute(ref_t_bias_placeholder, PermutationVector(1U, 2U, 0U));
97 auto ref_t_l1_addend_nchw = reference::permute(ref_t_l1_addend, PermutationVector(1U, 2U, 0U));
98 auto t_dst_shape_nchw = t_dst_shape;
99 permute(t_dst_shape_nchw, PermutationVector(1U, 2U, 0U));
100
101 PadStrideInfo legacy_pad_stride(conv2d_desc.stride.x(), conv2d_desc.stride.y(), conv2d_desc.pad.left, conv2d_desc.pad.right, conv2d_desc.pad.top, conv2d_desc.pad.bottom, DimensionRoundingType{});
102 auto ref_t_dst_nchw = reference::arithmetic_operation(
103 ArithmeticOperation::ADD,
104 ref_t_l1_addend_nchw,
105 reference::convolution_layer(ref_t_input_nchw, ref_t_weight_nchw, ref_t_bias_placeholder_nchw, t_dst_shape_nchw, legacy_pad_stride, conv2d_desc.dilation),
106 data_type,
107 ConvertPolicy{});
108 const auto ref_t_dst = reference::permute(ref_t_dst_nchw, PermutationVector(2U, 0U, 1U));
109
110 CLScheduler::get().default_reinit();
111 const auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
112 OperatorGraph op_graph;
113
114 const auto op_t_input = add_tensor(op_graph, t_input_info);
115 const auto op_t_weight = add_tensor(op_graph, t_weight_info);
116 const auto op_t_l1_addend = add_tensor(op_graph, t_l1_addend_info);
117 const auto op_t_acc = add_tensor(op_graph, t_acc_info); // temp accumulator; TensorInfo to be inferred
118 const auto op_t_dst = add_tensor(op_graph, t_dst_info);
119
120 auto conv2d = add_op_conv2d(op_graph, conv2d_desc, op_t_input, op_t_weight, op_t_acc);
121 force_conv2d_method(op_graph, conv2d, ConvolutionMethod::DIRECT);
Michalis Spyroub1fcefd2022-06-15 19:02:28 +0100122 add_op_elementwise_op(op_graph, add_desc, op_t_acc, op_t_l1_addend, op_t_dst);
SiCong Lib63b1192022-01-28 18:24:39 +0000123
124 const ClWorkloadContext workload_ctx{ GpuInfo{ CLScheduler::get().target() } };
125 ClWorkload workload;
126 build(workload, op_graph, workload_ctx);
127
128 ClCompositeOperator op;
129 op.configure(cl_compile_ctx, workload);
130
131 // Construct tensors
132 CLTensor t_input{};
133 CLTensor t_weight{};
134 CLTensor t_l1_addend{};
135 CLTensor t_dst{};
136
137 // Init tensors
138 t_input.allocator()->init(t_input_info);
139 t_weight.allocator()->init(t_weight_info);
140 t_l1_addend.allocator()->init(t_dst_info);
141 t_dst.allocator()->init(t_dst_info);
142
143 // Allocate and fill tensors
144 t_input.allocator()->allocate();
145 t_weight.allocator()->allocate();
146 t_l1_addend.allocator()->allocate();
147 t_dst.allocator()->allocate();
148 fill<float>(CLAccessor(t_input), 0, library.get());
149 fill<float>(CLAccessor(t_weight), 1, library.get());
150 fill<float>(CLAccessor(t_l1_addend), 2, library.get());
151 // "Pack" tensors
152 OpTensorBinding bp_tensors({ { op_t_input, &t_input },
153 { op_t_weight, &t_weight },
154 { op_t_l1_addend, &t_l1_addend },
155 { op_t_dst, &t_dst }
156 });
157
158 // Populate prepare and run pack-maps (including allocating aux tensors)
159 ClAuxTensorData aux_tensor_data{};
160 TensorPackMap prepare_pack_map{};
161 TensorPackMap run_pack_map{};
162 bind_tensors(aux_tensor_data, prepare_pack_map, run_pack_map, workload, bp_tensors);
163
164 op.prepare(prepare_pack_map);
165 op.run(run_pack_map);
166 RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
167 validate(CLAccessor(t_dst), ref_t_dst_nchw, tolerance_f32);
168}
169TEST_SUITE(Unsupported)
170TEST_CASE(DataType_QASYMM8, framework::DatasetMode::ALL)
171{
172 const auto data_type = DataType::QASYMM8;
173 const auto data_layout = DataLayout::NHWC;
174 const auto t_input_shape = TensorShape(384, 12, 12);
175 const auto t_weight_shape = TensorShape(384, 1, 1, 64);
176 const auto t_dst_shape = TensorShape(64, 12, 12);
177 auto t_input_info = TensorInfo(t_input_shape, 1, data_type, data_layout);
178 auto t_weight_info = TensorInfo(t_weight_shape, 1, data_type, data_layout);
179 auto t_l1_addend_info = TensorInfo(t_dst_shape, 1, data_type, data_layout);
180 auto t_acc_info = TensorInfo(t_dst_shape, 1, data_type, data_layout);
181 auto t_dst_info = TensorInfo(t_dst_shape, 1, data_type, data_layout);
182
Michalis Spyroub1fcefd2022-06-15 19:02:28 +0100183 Conv2dDescriptor conv2d_desc{};
184 ElementwiseDescriptor add_desc{};
SiCong Lib63b1192022-01-28 18:24:39 +0000185
186 OperatorGraph op_graph;
187
188 const auto op_t_input = add_tensor(op_graph, t_input_info);
189 const auto op_t_weight = add_tensor(op_graph, t_weight_info);
190 const auto op_t_l1_addend = add_tensor(op_graph, t_l1_addend_info);
191 const auto op_t_acc = add_tensor(op_graph, t_acc_info); // temp accumulator; TensorInfo to be inferred
192 const auto op_t_dst = add_tensor(op_graph, t_dst_info);
193
194 auto conv2d = add_op_conv2d(op_graph, conv2d_desc, op_t_input, op_t_weight, op_t_acc);
Michalis Spyroub1fcefd2022-06-15 19:02:28 +0100195 add_op_elementwise_op(op_graph, add_desc, op_t_acc, op_t_l1_addend, op_t_dst);
SiCong Lib63b1192022-01-28 18:24:39 +0000196 force_conv2d_method(op_graph, conv2d, ConvolutionMethod::DIRECT);
197
198 const ClWorkloadContext workload_ctx{ GpuInfo{ CLScheduler::get().target() } };
199 ClWorkload workload;
200 const auto success = build(workload, op_graph, workload_ctx);
201
202 ARM_COMPUTE_EXPECT(!bool(success), framework::LogLevel::ERRORS);
203 ARM_COMPUTE_EXPECT(!bool(ClCompositeOperator::validate(workload)), framework::LogLevel::ERRORS);
204}
205TEST_CASE(DataLayout_NCHW, framework::DatasetMode::ALL)
206{
207 const auto data_type = DataType::F32;
208 const auto data_layout = DataLayout::NCHW;
209 const auto t_input_shape = TensorShape(384, 12, 12);
210 const auto t_weight_shape = TensorShape(384, 1, 1, 64);
211 const auto t_dst_shape = TensorShape(64, 12, 12);
212 auto t_input_info = TensorInfo(t_input_shape, 1, data_type, data_layout);
213 auto t_weight_info = TensorInfo(t_weight_shape, 1, data_type, data_layout);
214 auto t_dst_info = TensorInfo(t_dst_shape, 1, data_type, data_layout);
215
216 Conv2dDescriptor conv2d_desc{};
217
218 OperatorGraph op_graph;
219
220 const auto op_t_input = add_tensor(op_graph, t_input_info);
221 const auto op_t_weight = add_tensor(op_graph, t_weight_info);
222 const auto op_t_dst = add_tensor(op_graph, t_dst_info);
223
224 auto conv2d = add_op_conv2d(op_graph, conv2d_desc, op_t_input, op_t_weight, op_t_dst);
225 force_conv2d_method(op_graph, conv2d, ConvolutionMethod::DIRECT);
226 const ClWorkloadContext workload_ctx{ GpuInfo{ CLScheduler::get().target() } };
227 ClWorkload workload;
228 const auto success = build(workload, op_graph, workload_ctx);
229
230 ARM_COMPUTE_EXPECT(!bool(success), framework::LogLevel::ERRORS);
231 ARM_COMPUTE_EXPECT(!bool(ClCompositeOperator::validate(workload)), framework::LogLevel::ERRORS);
232}
233TEST_SUITE_END() // Unsupported
234
235TEST_SUITE(Invalid)
236TEST_CASE(Multiple_Complex_Ops_0, framework::DatasetMode::ALL)
237{
238 /* Computation:
239 * out = conv2d(conv2d(l0_input, l0_weight), l1_weight)
240 */
241 const auto data_type = DataType::F32;
242 const auto data_layout = DataLayout::NHWC;
243 const auto t_l0_input_shape = TensorShape(1024, 56, 56);
244 const auto t_l0_weight_shape = TensorShape(512, 1024, 1, 1);
245 const auto t_l1_weight_shape = TensorShape(512, 256, 1, 1);
246
247 auto t_l0_input_info = TensorInfo(t_l0_input_shape, 1, data_type, data_layout);
248 auto t_l0_weight_info = TensorInfo(t_l0_weight_shape, 1, data_type, data_layout);
249 auto t_l1_weight_info = TensorInfo(t_l1_weight_shape, 1, data_type, data_layout);
250 auto t_l0_dst_info = TensorInfo();
251 auto t_dst_info = TensorInfo();
252
253 OperatorGraph op_graph;
254 const auto conv2d_desc = Conv2dDescriptor{};
255
256 const auto op_t_l0_input = add_tensor(op_graph, t_l0_input_info);
257 const auto op_t_l0_weight = add_tensor(op_graph, t_l0_weight_info);
258 const auto op_t_l1_weight = add_tensor(op_graph, t_l1_weight_info);
259 const auto op_t_l0_dst = add_tensor(op_graph, t_l0_dst_info); // temp accumulator; TensorInfo to be inferred
260 const auto op_t_dst = add_tensor(op_graph, t_dst_info);
261
262 add_op_conv2d(op_graph, conv2d_desc, op_t_l0_input, op_t_l0_weight, op_t_l0_dst);
263 add_op_conv2d(op_graph, conv2d_desc, op_t_l0_dst, op_t_l1_weight, op_t_dst);
264
265 const ClWorkloadContext workload_ctx{ GpuInfo{ CLScheduler::get().target() } };
266 ClWorkload workload;
267 const auto success = build(workload, op_graph, workload_ctx);
268
269 ARM_COMPUTE_EXPECT(!bool(success), framework::LogLevel::ERRORS);
270 ARM_COMPUTE_EXPECT(!bool(ClCompositeOperator::validate(workload)), framework::LogLevel::ERRORS);
271}
272TEST_CASE(Enlarging_Execution_Space, framework::DatasetMode::ALL)
273{
274 /* Computation:
275 * out = add(l2_lhs, add(add(l0_lhs, l0_rhs), l1_rhs))
276 */
277 const auto data_type = DataType::F32;
278 const auto data_layout = DataLayout::NHWC;
279 const auto t_l0_lhs_shape = TensorShape(1, 256, 3);
280 const auto t_l0_rhs_shape = TensorShape(1, 256, 3);
281 const auto t_l1_rhs_shape = TensorShape(1, 1, 3);
282 const auto t_l2_lhs_shape = TensorShape(1024, 1, 3);
283
284 auto t_l0_lhs_info = TensorInfo(t_l0_lhs_shape, 1, data_type, data_layout);
285 auto t_l0_rhs_info = TensorInfo(t_l0_rhs_shape, 1, data_type, data_layout);
286 auto t_l1_rhs_info = TensorInfo(t_l1_rhs_shape, 1, data_type, data_layout);
287 auto t_l2_lhs_info = TensorInfo(t_l2_lhs_shape, 1, data_type, data_layout);
288 auto t_l0_dst_info = TensorInfo();
289 auto t_l1_dst_info = TensorInfo();
290 auto t_dst_info = TensorInfo();
291
292 OperatorGraph op_graph;
Michalis Spyroub1fcefd2022-06-15 19:02:28 +0100293 const auto add_desc = ElementwiseDescriptor{};
SiCong Lib63b1192022-01-28 18:24:39 +0000294
295 const auto op_t_l0_lhs = add_tensor(op_graph, t_l0_lhs_info);
296 const auto op_t_l0_rhs = add_tensor(op_graph, t_l0_rhs_info);
297 const auto op_t_l1_rhs = add_tensor(op_graph, t_l1_rhs_info);
298 const auto op_t_l2_lhs = add_tensor(op_graph, t_l2_lhs_info);
299 const auto op_t_l0_dst = add_tensor(op_graph, t_l0_dst_info); // temp accumulator; TensorInfo to be inferred
300 const auto op_t_l1_dst = add_tensor(op_graph, t_l1_dst_info); // temp accumulator; TensorInfo to be inferred
301 const auto op_t_dst = add_tensor(op_graph, t_dst_info);
302
Michalis Spyroub1fcefd2022-06-15 19:02:28 +0100303 add_op_elementwise_op(op_graph, add_desc, op_t_l0_lhs, op_t_l0_rhs, op_t_l0_dst);
304 add_op_elementwise_op(op_graph, add_desc, op_t_l0_dst, op_t_l1_rhs, op_t_l1_dst);
305 add_op_elementwise_op(op_graph, add_desc, op_t_l1_dst, op_t_l2_lhs, op_t_dst);
SiCong Lib63b1192022-01-28 18:24:39 +0000306
307 const ClWorkloadContext workload_ctx{ GpuInfo{ CLScheduler::get().target() } };
308 ClWorkload workload;
309 const auto success = build(workload, op_graph, workload_ctx);
310
311 ARM_COMPUTE_EXPECT(!bool(success), framework::LogLevel::ERRORS);
312 ARM_COMPUTE_EXPECT(!bool(ClCompositeOperator::validate(workload)), framework::LogLevel::ERRORS);
313}
314TEST_CASE(Root_Simple_And_Complex, framework::DatasetMode::ALL)
315{
316 /* Computation:
317 * out = add(conv(l0_0_input, l0_0_weight), add(l0_1_lhs, l0_1_rhs))
318 */
319 const auto data_type = DataType::F32;
320 const auto data_layout = DataLayout::NHWC;
321
322 const auto t_l0_0_input_shape = TensorShape(128, 21, 21);
323 const auto t_l0_0_weight_shape = TensorShape(144, 128, 1, 1);
324 const auto t_l0_1_lhs_shape = TensorShape(144, 21, 21);
325 const auto t_l0_1_rhs_shape = TensorShape(1, 1, 21);
326
327 auto t_l0_0_input_info = TensorInfo(t_l0_0_input_shape, 1, data_type, data_layout);
328 auto t_l0_0_weight_info = TensorInfo(t_l0_0_weight_shape, 1, data_type, data_layout);
329 auto t_l0_1_lhs_info = TensorInfo(t_l0_1_lhs_shape, 1, data_type, data_layout);
330 auto t_l0_1_rhs_info = TensorInfo(t_l0_1_rhs_shape, 1, data_type, data_layout);
331 auto t_l0_0_dst_info = TensorInfo();
332 auto t_l0_1_dst_info = TensorInfo();
333 auto t_dst_info = TensorInfo();
334
335 OperatorGraph op_graph;
336 const auto conv2d_desc = Conv2dDescriptor{};
Michalis Spyroub1fcefd2022-06-15 19:02:28 +0100337 const auto add_desc = ElementwiseDescriptor{};
SiCong Lib63b1192022-01-28 18:24:39 +0000338
339 const auto op_t_l0_0_input = add_tensor(op_graph, t_l0_0_input_info);
340 const auto op_t_l0_0_weight = add_tensor(op_graph, t_l0_0_weight_info);
341 const auto op_t_l0_1_lhs = add_tensor(op_graph, t_l0_1_lhs_info);
342 const auto op_t_l0_1_rhs = add_tensor(op_graph, t_l0_1_rhs_info);
343 const auto op_t_l0_0_dst = add_tensor(op_graph, t_l0_0_dst_info); // temp accumulator; TensorInfo to be inferred
344 const auto op_t_l0_1_dst = add_tensor(op_graph, t_l0_1_dst_info); // temp accumulator; TensorInfo to be inferred
345 const auto op_t_dst = add_tensor(op_graph, t_dst_info);
346
347 add_op_conv2d(op_graph, conv2d_desc, op_t_l0_0_input, op_t_l0_0_weight, op_t_l0_0_dst);
Michalis Spyroub1fcefd2022-06-15 19:02:28 +0100348 add_op_elementwise_op(op_graph, add_desc, op_t_l0_1_lhs, op_t_l0_1_rhs, op_t_l0_1_dst);
349 add_op_elementwise_op(op_graph, add_desc, op_t_l0_0_dst, op_t_l0_1_dst, op_t_dst);
SiCong Lib63b1192022-01-28 18:24:39 +0000350
351 const ClWorkloadContext workload_ctx{ GpuInfo{ CLScheduler::get().target() } };
352 ClWorkload workload;
353 const auto success = build(workload, op_graph, workload_ctx);
354
355 ARM_COMPUTE_EXPECT(!bool(success), framework::LogLevel::ERRORS);
356 ARM_COMPUTE_EXPECT(!bool(ClCompositeOperator::validate(workload)), framework::LogLevel::ERRORS);
357}
358TEST_CASE(Loop, framework::DatasetMode::ALL)
359{
360 /* Computation:
361 * tensor state0;
362 * state1 = add(l0_lhs, state0)
363 * state0 = add(l1_lhs, state1)
364 */
365 const auto data_type = DataType::F32;
366 const auto data_layout = DataLayout::NHWC;
367
368 const auto t_shape = TensorShape(13, 21);
369
370 auto t_l0_lhs_info = TensorInfo(t_shape, 1, data_type, data_layout);
371 auto t_l1_lhs_info = TensorInfo(t_shape, 1, data_type, data_layout);
372 auto state0_info = TensorInfo(t_shape, 1, data_type, data_layout);
373 auto state1_info = TensorInfo();
374
375 OperatorGraph op_graph;
376 const auto conv2d_desc = Conv2dDescriptor{};
Michalis Spyroub1fcefd2022-06-15 19:02:28 +0100377 const auto add_desc = ElementwiseDescriptor{};
SiCong Lib63b1192022-01-28 18:24:39 +0000378
379 const auto op_t_l0_lhs = add_tensor(op_graph, t_l0_lhs_info);
380 const auto op_t_l1_lhs = add_tensor(op_graph, t_l1_lhs_info);
381 const auto op_t_state0 = add_tensor(op_graph, state0_info);
382 const auto op_t_state1 = add_tensor(op_graph, state1_info);
383
384 add_op_conv2d(op_graph, conv2d_desc, op_t_l0_lhs, op_t_state0, op_t_state1);
Michalis Spyroub1fcefd2022-06-15 19:02:28 +0100385 add_op_elementwise_op(op_graph, add_desc, op_t_l1_lhs, op_t_state1, op_t_state0);
SiCong Lib63b1192022-01-28 18:24:39 +0000386
387 const ClWorkloadContext workload_ctx{ GpuInfo{ CLScheduler::get().target() } };
388 ClWorkload workload;
389 const auto success = build(workload, op_graph, workload_ctx);
390
391 ARM_COMPUTE_EXPECT(!bool(success), framework::LogLevel::ERRORS);
392 ARM_COMPUTE_EXPECT(!bool(ClCompositeOperator::validate(workload)), framework::LogLevel::ERRORS);
393}
394TEST_SUITE_END() // Invalid
395
396TEST_SUITE_END() // DYNAMIC_FUSION
397TEST_SUITE_END() // INTEGRATION
398TEST_SUITE_END() // CL
399} // namespace validation
400} // namespace test
SiCong Li4e9f5682022-05-10 10:15:59 +0100401} // namespace arm_compute
402#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */