blob: e5de3a75c7fc51860ec8c6f568a8844bb5b474a6 [file] [log] [blame]
Vidhya Sudhan Loganathan338595b2019-06-28 14:09:53 +01001/*
Michele Di Giorgiof932d2c2020-07-06 11:27:21 +01002 * Copyright (c) 2019-2020 Arm Limited.
Vidhya Sudhan Loganathan338595b2019-06-28 14:09:53 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/CL/CLTensor.h"
26#include "arm_compute/runtime/CL/CLTensorAllocator.h"
27#include "arm_compute/runtime/CL/functions/CLConcatenateLayer.h"
28#include "tests/CL/CLAccessor.h"
29#include "tests/datasets/ShapeDatasets.h"
30#include "tests/framework/Asserts.h"
31#include "tests/framework/Macros.h"
32#include "tests/framework/datasets/Datasets.h"
33#include "tests/validation/Validation.h"
34#include "tests/validation/fixtures/ConcatenateLayerFixture.h"
35
36namespace arm_compute
37{
38namespace test
39{
40namespace validation
41{
Giorgio Arena53048842020-10-07 16:03:43 +010042namespace
43{
44/** Zero padding test */
45bool validate_zero_padding(unsigned int width, unsigned int height, unsigned int channels, unsigned int batches, DataType data_type)
46{
47 TensorShape src_shape(width, height, channels, batches);
48 TensorShape dst_shape(width, height, channels, batches * 2);
49
50 // Create tensors
51 CLTensor src0 = create_tensor<CLTensor>(src_shape, data_type);
52 CLTensor src1 = create_tensor<CLTensor>(src_shape, data_type);
53 CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type);
54
55 src0.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
56 src1.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
57 dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
58
59 ARM_COMPUTE_EXPECT(src0.info()->is_resizable(), framework::LogLevel::ERRORS);
60 ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
61 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
62
63 std::vector<const ICLTensor *> srcs = { &src0, &src1 };
64
65 // Create and configure function
66 CLConcatenateLayer concat;
67 concat.configure(srcs, &dst, 3U);
68
69 // Padding can be added along rhs and bias's X dimension
70 return src0.info()->padding().empty() && src1.info()->padding().empty() && dst.info()->padding().empty();
71}
72}
Vidhya Sudhan Loganathan338595b2019-06-28 14:09:53 +010073TEST_SUITE(CL)
74TEST_SUITE(BatchConcatenateLayer)
75
76// *INDENT-OFF*
77// clang-format off
78DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
79 framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Mismatching data type input/output
80 TensorInfo(TensorShape(20U, 27U, 4U, 4U), 1, DataType::F32), // Mismatching x dimension
81 TensorInfo(TensorShape(23U, 26U, 4U, 3U), 1, DataType::F32), // Mismatching y dim
82 TensorInfo(TensorShape(23U, 27U, 4U, 3U), 1, DataType::F32), // Mismatching z dim
83 TensorInfo(TensorShape(16U, 27U, 3U, 6U), 1, DataType::F32)
84 }),
85 framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32),
86 TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32),
87 TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32),
88 TensorInfo(TensorShape(23U, 27U, 3U, 3U), 1, DataType::F32),
89 TensorInfo(TensorShape(16U, 27U, 3U, 6U), 1, DataType::F32)
90 })),
91 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F16),
92 TensorInfo(TensorShape(23U, 12U, 4U, 4U), 1, DataType::F32),
93 TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32),
94 TensorInfo(TensorShape(23U, 20U, 4U, 3U), 1, DataType::F32),
95 TensorInfo(TensorShape(16U, 27U, 3U, 12U), 1, DataType::F32)
96 })),
97 framework::dataset::make("Expected", { false, false, false, false, true })),
98 input_info1, input_info2, output_info,expected)
99{
100 std::vector<TensorInfo> inputs_vector_info;
101 inputs_vector_info.emplace_back(std::move(input_info1));
102 inputs_vector_info.emplace_back(std::move(input_info2));
103
Michele Di Giorgiof932d2c2020-07-06 11:27:21 +0100104 std::vector<const ITensorInfo *> inputs_vector_info_raw;
Vidhya Sudhan Loganathan338595b2019-06-28 14:09:53 +0100105 inputs_vector_info_raw.reserve(inputs_vector_info.size());
106 for(auto &input : inputs_vector_info)
107 {
108 inputs_vector_info_raw.emplace_back(&input);
109 }
110
111 bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), 3));
112 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
113}
Giorgio Arena53048842020-10-07 16:03:43 +0100114
115/** Validate zero padding tests
116 *
117 * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
118 *
119 * Checks performed in order:
120 * - First dimension multiple of 16
121 * - First dimension non-multiple of 16
122 * - First dimension less than 16 (vec_size for qasymm8) but multiple
123 * - First dimension less than 16 (vec_size for qasymm8) non-multiple
124 * - Tensor with only one element
125 */
126DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
127framework::dataset::make("Width", { 32U, 37U, 12U, 13U, 1U }),
128framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
129width, data_type)
130{
131 const bool one_elem = (width == 1U);
132 bool status = validate_zero_padding(width, one_elem ? 1U : 17U, one_elem ? 1U : 7U, one_elem ? 1U : 2U, data_type);
133 ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
134}
Vidhya Sudhan Loganathan338595b2019-06-28 14:09:53 +0100135// clang-format on
136// *INDENT-ON*
137
Vidhya Sudhan Loganathan338595b2019-06-28 14:09:53 +0100138template <typename T>
139using CLBatchConcatenateLayerFixture = ConcatenateLayerValidationFixture<CLTensor, ICLTensor, CLAccessor, CLConcatenateLayer, T>;
140
141TEST_SUITE(Float)
142TEST_SUITE(FP16)
143FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchConcatenateLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()),
144 framework::dataset::make("DataType",
145 DataType::F16)),
146 framework::dataset::make("Axis", 3)))
147{
148 // Validate output
149 validate(CLAccessor(_target), _reference);
150}
151FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchConcatenateLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Small4DShapes()),
152 framework::dataset::make("DataType",
153 DataType::F16)),
154 framework::dataset::make("Axis", 3)))
155{
156 // Validate output
157 validate(CLAccessor(_target), _reference);
158}
159TEST_SUITE_END()
160
161TEST_SUITE(FP32)
162FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchConcatenateLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()),
163 framework::dataset::make("DataType",
164 DataType::F32)),
165 framework::dataset::make("Axis", 3)))
166{
167 // Validate output
168 validate(CLAccessor(_target), _reference);
169}
170FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType",
171 DataType::F32)),
172 framework::dataset::make("Axis", 3)))
173{
174 // Validate output
175 validate(CLAccessor(_target), _reference);
176}
177TEST_SUITE_END()
178TEST_SUITE_END()
179
180TEST_SUITE(Quantized)
181TEST_SUITE(QASYMM8)
182FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchConcatenateLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()),
183 framework::dataset::make("DataType",
184 DataType::QASYMM8)),
185 framework::dataset::make("Axis", 3)))
186{
187 // Validate output
188 validate(CLAccessor(_target), _reference);
189}
190FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchConcatenateLayerFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType",
191 DataType::QASYMM8)),
192 framework::dataset::make("Axis", 3)))
193{
194 // Validate output
195 validate(CLAccessor(_target), _reference);
196}
197TEST_SUITE_END()
198TEST_SUITE_END()
199
200TEST_SUITE_END()
201TEST_SUITE_END()
202} // namespace validation
203} // namespace test
204} // namespace arm_compute