blob: ea5eca6261a4ff31fdf892772c9aa693ed88b7ce [file] [log] [blame]
Sheri Zhangb18252d2020-04-07 11:04:57 +01001/*
2 * Copyright (c) 2020 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h"
25#include "tests/CL/CLAccessor.h"
26#include "tests/PaddingCalculator.h"
27#include "tests/datasets/ShapeDatasets.h"
28#include "tests/framework/Asserts.h"
29#include "tests/framework/Macros.h"
30#include "tests/framework/datasets/Datasets.h"
31#include "tests/validation/Helpers.h"
32#include "tests/validation/Validation.h"
33#include "tests/validation/fixtures/QLSTMLayerNormalizationFixture.h"
34
35namespace arm_compute
36{
37namespace test
38{
39namespace validation
40{
41namespace
42{
43constexpr AbsoluteTolerance<int16_t> tolerance_s16(0); /**< Tolerance value for comparing reference's output against implementation's output for QSYMM16 data types */
44constexpr uint32_t vector_size_byte = 16;
45
46using test::datasets::ShapeDataset;
47template <uint32_t num_elements_per_iter, uint32_t num_batches, uint32_t num_iteration>
48class QLSTMLayerNormShapeDataSet : public ShapeDataset
49{
50 static constexpr auto boundary_minus_one = num_elements_per_iter * num_iteration - 1;
51 static constexpr auto boundary = num_elements_per_iter * num_iteration;
52 static constexpr auto boundary_plus_one = num_elements_per_iter * num_iteration + 1;
53
54public:
55 QLSTMLayerNormShapeDataSet(std::string name)
56 : ShapeDataset(name,
57 {
58 TensorShape{ boundary_minus_one, num_batches },
59 TensorShape{ boundary, num_batches },
60 TensorShape{ boundary_plus_one, num_batches }
61 })
62 {
63 }
64};
65
66template <uint32_t num_elements_per_iter, uint32_t num_batches>
67class QLSTMLayerNormShapeDataSet<num_elements_per_iter, num_batches, 0> : public ShapeDataset
68{
69public:
70 QLSTMLayerNormShapeDataSet(std::string name)
71 : ShapeDataset(name,
72 {
73 TensorShape{ 1, num_batches },
74 TensorShape{ 2, num_batches }
75 })
76 {
77 }
78};
79} // namespace
80TEST_SUITE(CL)
81TEST_SUITE(QLSTMLayerNormalization)
82
83static const TensorShape correct_input_shape{ TensorShape(15U, 2U) };
84static const TensorShape correct_weight_shape{ TensorShape(15U) };
85static const TensorShape correct_bias_shape{ TensorShape(15U) };
86static const DataType correct_input_dt{ DataType::QSYMM16 };
87static const DataType correct_weight_dt{ DataType::QSYMM16 };
88static const DataType correct_bias_dt{ DataType::S32 };
89static const uint32_t tensor_num_channel{ 1 };
90
91// *INDENT-OFF*
92// clang-format off
93
94DATA_TEST_CASE(Validate, framework::DatasetMode::ALL,
95 zip(zip(
96 framework::dataset::make("InputInfo", {
97 TensorInfo(correct_input_shape, tensor_num_channel, DataType::F16), // input supports only QSYMM16
98 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // weight supports only QSYMM16
99 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // bias supports only S32
100 TensorInfo(TensorShape(15U, 2U, 2U), tensor_num_channel, correct_input_dt), // input supports only up to 2D
101 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // weight supports only up to 1D
102 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // bias supports only up to 1D
103 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // input_shape[0] != weight_shape[0] should fail
104 TensorInfo(correct_input_shape, tensor_num_channel, correct_input_dt), // weight_shape[0] != bias_shape[0] should fail
105 }),
106 framework::dataset::make("WeightInfo", {
107 TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
108 TensorInfo(correct_weight_shape, tensor_num_channel, DataType::F16),
109 TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
110 TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
111 TensorInfo(TensorShape(15U, 2U), tensor_num_channel, correct_weight_dt),
112 TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
113 TensorInfo(TensorShape(14U), tensor_num_channel, correct_weight_dt),
114 TensorInfo(correct_weight_shape, tensor_num_channel, correct_weight_dt),
115 })
116 ),
117 framework::dataset::make("BiasInfo", {
118 TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
119 TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
120 TensorInfo(correct_bias_shape, tensor_num_channel, DataType::QSYMM16),
121 TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
122 TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
123 TensorInfo(TensorShape(15U, 2U), tensor_num_channel, correct_bias_dt),
124 TensorInfo(correct_bias_shape, tensor_num_channel, correct_bias_dt),
125 TensorInfo(TensorShape(14U), tensor_num_channel, correct_bias_dt),
126 })
127 ), input_info, weight_info, bias_info)
128{
129 TensorInfo dummy_output{};
130 const Status s = CLQLSTMLayerNormalizationKernel::validate(&input_info, &dummy_output, &weight_info, &bias_info);
131 ARM_COMPUTE_EXPECT(!bool(s), framework::LogLevel::ERRORS);
132}
133
134// clang-format on
135// *INDENT-ON*
136
137template <typename T>
138using CLQLSTMLayerNormalizationFixture = CLQLSTMLayerNormalizationValidationFixture<CLTensor, CLAccessor, CLQLSTMLayerNormalizationKernel, T>;
139
140TEST_SUITE(Quantized)
141TEST_SUITE(QSYMM16)
142
143/** Tests will be targetting
144 * - Comparison between OpenCL kernel and the exact same but scalar version of reference kernel
145 * - Input shapes of 1D and 2D with the first dimension covers boundary values of 128-bit vector size (0~3 iterations)
146 * - Weight and bias 1D shape that have same size as that of input shapes
147 * - Quantization scale is greater and smaller than one.
148 * - Input values will be noted in fixture.
149 *
150 * What we can't test
151 * - Since reference kernel uses the exact the same algorithm in the same quantized domain
152 * it is hard to fully test whether the algorithm accomplishes what it is supposed to.
153 * - The algorithm has been sensitive to quantization scale but it is hard to fully test
154 * the sensitivity due to aforementioned reason.
155 * - Again, it is hard to fully test corner values due to the exact same algorithm of the
156 * reference kernel and the OpenCL kernel.
157 */
158
159constexpr uint32_t qsymm16_per_vector = vector_size_byte / sizeof(int16_t);
160
161#define QSYMM16_DATASET_ITER(num_input_batch, num_iter) \
162 combine(combine(zip(zip(QLSTMLayerNormShapeDataSet<qsymm16_per_vector, num_input_batch, num_iter>("InputShape"), \
163 QLSTMLayerNormShapeDataSet<qsymm16_per_vector, 1, num_iter>("WeightShape")), \
164 QLSTMLayerNormShapeDataSet<qsymm16_per_vector, 1, num_iter>("BiasShape")), \
165 framework::dataset::make("DataType", DataType::QSYMM16)), \
166 framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1. / 8192), QuantizationInfo(8192) }))
167
168#define QSYMM16_DATASET_1D \
169 concat(concat(QSYMM16_DATASET_ITER(1, 0), QSYMM16_DATASET_ITER(1, 1)), QSYMM16_DATASET_ITER(1, 2))
170
171#define QSYMM16_DATASET_2D \
172 concat(concat(QSYMM16_DATASET_ITER(3, 0), QSYMM16_DATASET_ITER(3, 1)), QSYMM16_DATASET_ITER(3, 2))
173
174FIXTURE_DATA_TEST_CASE(RandomValue1D, CLQLSTMLayerNormalizationFixture<int16_t>, framework::DatasetMode::ALL, QSYMM16_DATASET_1D)
175{
176 // Validate output
177 validate(CLAccessor(_target), _reference, tolerance_s16);
178}
179
180FIXTURE_DATA_TEST_CASE(RandomValue2D, CLQLSTMLayerNormalizationFixture<int16_t>, framework::DatasetMode::ALL, QSYMM16_DATASET_2D)
181{
182 // Validate output
183 validate(CLAccessor(_target), _reference, tolerance_s16);
184}
185
186#undef QSYMM16_DATASET_ITER
187#undef QSYMM16_DATASET_2D
188#undef QSYMM16_DATASET_1D
189
190TEST_SUITE_END() // QSYMM16
191TEST_SUITE_END() // Quantized
192TEST_SUITE_END() // QLSTMLayerNormalization
193TEST_SUITE_END() // CL
194
195} // namespace validation
196} // namespace test
197} // namespace arm_compute