blob: 3bfbc16e71d8d886fc5683da4a1d03e2e4e44d67 [file] [log] [blame]
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +00001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEMatMul.h"
26
27#include "tests/NEON/Accessor.h"
28#include "tests/framework/Asserts.h"
29#include "tests/framework/Macros.h"
30#include "tests/framework/datasets/Datasets.h"
31#include "tests/validation/Validation.h"
32
33#include "tests/datasets/LargeMatMulDataset.h"
34#include "tests/datasets/SmallMatMulDataset.h"
35#include "tests/validation/fixtures/MatMulFixture.h"
36
37namespace arm_compute
38{
39namespace test
40{
41namespace validation
42{
43TEST_SUITE(NEON)
44TEST_SUITE(MatMul)
45
46constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */
47const AbsoluteTolerance<half> tolerance_fp16(half(0.1f));
48
49// clang-format off
50// *INDENT-OFF*
51// Validation Tests
52DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
53 framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Mismatching datatype
54 TensorInfo(TensorShape(9U, 6U), 1, DataType::S32), // Unsupported datatypes
55 TensorInfo(TensorShape(9U, 6U, 2U), 1, DataType::F32), // Broadcasting in batch dimension not supported
56 TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Invalid shape for multiplication
57 TensorInfo(TensorShape(9U, 6U), 1, DataType::F32),
58 TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32),
59 TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32), // Tensors are not dynamic
60 }),
61 framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8),
62 TensorInfo(TensorShape(5U, 9U), 1, DataType::S32),
63 TensorInfo(TensorShape(5U, 9U, 1U), 1, DataType::F32),
64 TensorInfo(TensorShape(5U, 12U), 1, DataType::F32),
65 TensorInfo(TensorShape(5U, 9U), 1, DataType::F32),
66 TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32),
67 TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32),
68 })),
69 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
70 TensorInfo(TensorShape(5U, 6U), 1, DataType::S32),
71 TensorInfo(TensorShape(5U, 6U, 2U), 1, DataType::F32),
72 TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
73 TensorInfo(TensorShape(5U, 6U), 1, DataType::F32),
74 TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32),
75 TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32),
76 })),
77 framework::dataset::make( "TensorIsConst", {false, false, false, false, false , false, true} )),
78 framework::dataset::make("Expected", { false, false, false, false, true, true, false })),
79 a_info, b_info, output_info, are_tensors_const, expected)
80{
81 TensorInfo a{a_info};
82 TensorInfo b{b_info};
83 a.set_are_values_constant(are_tensors_const);
84 b.set_are_values_constant(are_tensors_const);
85 Status status = NEMatMul::validate(&a,
86 &b,
87 &output_info,
88 MatMulInfo(),
89 CpuMatMulSettings());
90 ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
91}
92// *INDENT-ON*
93// clang-format on
94
95// Generic Template
96template <typename T>
97using NEMatMulFixture = MatMulValidationWithActivationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
98
99// Fast math Template
100template <typename T>
101using NEMatMulFastMathFixture = MatMulGenericValidationFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
102
103template <typename T>
104using NEMatMulDynamicTensorsFixture = MatMulValidationWithDynamicTensorsFixture<Tensor, Accessor, NEMatMul, CpuMatMulSettings, T>;
105
106TEST_SUITE(Float)
107TEST_SUITE(FP32)
108FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallMatMulDataset(),
109 framework::dataset::make("TransposeA", { false, true })),
110 framework::dataset::make("TransposeB", { false, true })),
111 framework::dataset::make("DataType", DataType::F32)),
112 framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
113{
114 // Validate output
115 validate(Accessor(_target), _reference, tolerance_fp32);
116}
117FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeMatMulDataset(),
118 framework::dataset::make("TransposeA", { false, true })),
119 framework::dataset::make("TransposeB", { false, true })),
120 framework::dataset::make("DataType", DataType::F32)),
121 framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
122{
123 // Validate output
124 validate(Accessor(_target), _reference, tolerance_fp32);
125}
126FIXTURE_DATA_TEST_CASE(RunHighDimensions, NEMatMulFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::HighDimensionalMatMulDataset(),
127 framework::dataset::make("TransposeA", { false, true })),
128 framework::dataset::make("TransposeB", { false, true })),
129 framework::dataset::make("DataType", DataType::F32)),
130 framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
131{
132 // Validate output
133 validate(Accessor(_target), _reference, tolerance_fp32);
134}
135
136FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(),
137 framework::dataset::make("TransposeA", { false, true })),
138 framework::dataset::make("TransposeB", { false, true })),
139 framework::dataset::make("DataType", DataType::F32)),
140 framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
141 framework::dataset::make("NumberOfRuns", 5)))
142{
143 // Validate output
144 validate(Accessor(_target), _reference, tolerance_fp32);
145}
146TEST_SUITE_END() // FP32
147
148#ifdef ARM_COMPUTE_ENABLE_BF16
149/* Note : MatMul BF16 is enabled by specifying FP32 datatype and enabling the fast math setting */
150constexpr AbsoluteTolerance<float> tolerance_bf16(0.001f);
151TEST_SUITE(BF16)
152FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFastMathFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(),
153 framework::dataset::make("TransposeA", { false, true })),
154 framework::dataset::make("TransposeB", { false, true })),
155 framework::dataset::make("DataType", DataType::F32)),
156 framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })),
157 framework::dataset::make("RunTimes", { 0 })),
158 framework::dataset::make("Settings", { CpuMatMulSettings().fast_math(true) })))
159{
160 // Validate output
161 validate(Accessor(_target), _reference, tolerance_bf16);
162}
163TEST_SUITE_END() // BF16
164#endif /* ARM_COMPUTE_ENABLE_BF16 */
165
166#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
167TEST_SUITE(FP16)
168FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallMatMulDataset(),
169 framework::dataset::make("TransposeA", { false, true })),
170 framework::dataset::make("TransposeB", { false, true })),
171 framework::dataset::make("DataType", DataType::F16)),
172 framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
173{
174 // Validate output
175 validate(Accessor(_target), _reference, tolerance_fp16);
176}
177FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeMatMulDataset(),
178 framework::dataset::make("TransposeA", { false, true })),
179 framework::dataset::make("TransposeB", { false, true })),
180 framework::dataset::make("DataType", DataType::F16)),
181 framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })))
182{
183 // Validate output
184 validate(Accessor(_target), _reference, tolerance_fp16);
185}
186FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(),
187 framework::dataset::make("TransposeA", { false, true })),
188 framework::dataset::make("TransposeB", { false, true })),
189 framework::dataset::make("DataType", DataType::F16)),
190 framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
191 framework::dataset::make("NumberOfRuns", 5)))
192{
193 // Validate output
194 validate(Accessor(_target), _reference, tolerance_fp16);
195}
196TEST_SUITE_END() // FP16
197#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
198
199TEST_SUITE_END() // Float
200
201TEST_SUITE_END() // MatMul
202TEST_SUITE_END() // NEON
203} // namespace validation
204} // namespace test
205} // namespace arm_compute