blob: 2e79612a3773a40211a74af0b2874d8c2d1c0cc0 [file] [log] [blame]
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +00001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Jakub Sujak8f4b3df2023-10-30 16:04:51 +000024#ifndef ACL_TESTS_VALIDATION_FIXTURES_MATMULFIXTURE_H
25#define ACL_TESTS_VALIDATION_FIXTURES_MATMULFIXTURE_H
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000026
27#include "arm_compute/core/Types.h"
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +010028#include "arm_compute/core/Utils.h"
29#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Viet-Hoa Doa62129a2023-04-26 15:38:45 +010030#include "src/core/utils/quantization/AsymmHelpers.h"
Jakub Sujak8f4b3df2023-10-30 16:04:51 +000031#include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000032#include "tests/framework/Fixture.h"
Jakub Sujak8f4b3df2023-10-30 16:04:51 +000033#include "tests/validation/Validation.h"
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000034#include "tests/validation/reference/ActivationLayer.h"
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000035#include "tests/validation/reference/GEMM.h"
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +010036#include "tests/validation/reference/GEMMLowp.h"
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000037#include "tests/validation/reference/Permute.h"
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000038#include "tests/validation/reference/ReshapeLayer.h"
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +010039#include <limits>
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000040#include <random>
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +010041#include <type_traits>
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000042
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000043namespace arm_compute
44{
45namespace test
46{
47namespace validation
48{
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000049template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
50class MatMulGenericValidationFixture : public framework::Fixture
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000051{
52public:
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000053 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs,
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +010054 Settings settings, QuantizationInfo a_qinfo = QuantizationInfo(), QuantizationInfo b_qinfo = QuantizationInfo(), QuantizationInfo o_qinfo = QuantizationInfo())
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000055 {
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000056 // For brevity, the input shapes are assumed to be not-transposed for both a and b matrices.
57 if(transpose_a)
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000058 {
59 permute(shape_a, PermutationVector(1U, 0U));
60 }
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000061 if(transpose_b)
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000062 {
63 permute(shape_b, PermutationVector(1U, 0U));
64 }
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000065
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +010066 _target = compute_target(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, settings, a_qinfo, b_qinfo, o_qinfo);
67 _reference = compute_reference(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, a_qinfo, b_qinfo, o_qinfo);
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000068 }
69
70protected:
71 template <typename U>
72 void fill(U &&tensor, int i, float lo = -1.f, float hi = 1.f)
73 {
74 switch(tensor.data_type())
75 {
76 case DataType::F16:
77 {
78 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(lo), float(hi) };
79 library->fill(tensor, distribution, i);
80 break;
81 }
82 case DataType::F32:
83 {
84 std::uniform_real_distribution<float> distribution(lo, hi);
85 library->fill(tensor, distribution, i);
86 break;
87 }
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +010088 case DataType::QASYMM8:
89 case DataType::QASYMM8_SIGNED:
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000090 {
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000091 library->fill_tensor_uniform(tensor, i);
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +010092 break;
93 }
94 default:
95 {
96 ARM_COMPUTE_ERROR("Unsupported data type.");
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +000097 }
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +000098 }
99 }
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000100
101 TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool transpose_a, bool transpose_b, DataType data_type,
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100102 ActivationLayerInfo act_info, int num_extra_runs, const Settings &settings, QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000103 {
104 // 1. Create Classes and configure function
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000105 // ----------------------------------------------------
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000106 // Create tensors
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000107 // Configure relevant classes and matmul function
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100108 TensorType a = create_tensor<TensorType>(shape_a, data_type, 1, a_qinfo);
109 TensorType b = create_tensor<TensorType>(shape_b, data_type, 1, b_qinfo);
110 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, o_qinfo);
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000111
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000112 FunctionType matmul;
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000113
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000114 // Configure MatMulInfo class
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000115 MatMulInfo mm_info;
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100116 mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b);
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000117
118 // Ensure values are dynamic
119 a.info()->set_are_values_constant(false);
120 b.info()->set_are_values_constant(false);
121
122 // Configure operator
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100123 matmul.configure(&a, &b, &dst, mm_info, settings, act_info);
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000124
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000125 // Assertions
126 ARM_COMPUTE_ASSERT(a.info()->is_resizable());
127 ARM_COMPUTE_ASSERT(b.info()->is_resizable());
128 ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000129
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000130 // Allocate tensors
131 a.allocator()->allocate();
132 b.allocator()->allocate();
133 dst.allocator()->allocate();
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000134
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000135 ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
136 ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
137 ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
138
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000139 // For multiple runs.
140 for(int i = 0; i < num_extra_runs; i++)
141 {
142 // Stress dynamic tensors by running multiple times.
143 // --------------------------------------------------------
144 // Fill tensors with new seed
145 // Run function
146 const int seed_offset = num_extra_runs * 100;
147 fill(AccessorType(a), seed_offset);
148 fill(AccessorType(b), seed_offset + 1);
149
150 matmul.run();
151 }
152
153 // 2. Final Run for reference comparison
154 // --------------------------------------------------------
155 // Re-fill tensors same seed as reference run
156 // Compute MatMul operation
157 fill(AccessorType(a), 2);
158 fill(AccessorType(b), 3);
159
160 matmul.run();
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000161
162 return dst;
163 }
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000164
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100165 template <typename TT>
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100166 typename std::enable_if < !std::is_integral<TT>::value, SimpleTensor<TT >>::type
167 compute_reference_gemm(const SimpleTensor<TT> &a, const SimpleTensor<TT> &b, const SimpleTensor<TT> &c, float alpha, float beta, const QuantizationInfo &o_qinfo)
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000168 {
Viet-Hoa Doa62129a2023-04-26 15:38:45 +0100169 ARM_COMPUTE_UNUSED(o_qinfo);
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100170
171 return reference::gemm(a, b, c, alpha, beta);
172 }
173
174 template <typename TT>
175 typename std::enable_if<std::is_integral<TT>::value, SimpleTensor<TT>>::type
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100176 compute_reference_gemm(const SimpleTensor<TT> &a, const SimpleTensor<TT> &b, const SimpleTensor<TT> &c, float alpha, float beta, const QuantizationInfo &o_qinfo)
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100177 {
178 ARM_COMPUTE_UNUSED(alpha, beta);
179
180 const auto aq = a.quantization_info().uniform();
181 const auto bq = b.quantization_info().uniform();
182 const auto oq = o_qinfo.uniform();
183
184 const auto multiplier = aq.scale * bq.scale / oq.scale;
185
186 int32_t output_multiplier = 0;
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100187 int32_t output_shift = 0;
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100188 quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
189 std::vector<int32_t> output_multipliers{ output_multiplier };
190 std::vector<int32_t> output_shifts{ output_shift };
191
Jakub Sujake9b3ee22023-04-17 12:08:48 +0100192 //The lhs and rhs offsets are negated here to keep the reference aligned with the function implementation where the lhs and rhs offsets are also negated.
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100193 const auto tmp = reference::gemmlowp_matrix_multiply_core<int32_t>(
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100194 a, b, c.shape(), -aq.offset, -bq.offset);
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100195
196 auto output = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, TT>(
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100197 tmp, output_multipliers, output_shifts, oq.offset,
198 std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100199 output.quantization_info(o_qinfo);
200
201 return output;
202 }
203
204 SimpleTensor<T> compute_reference(const TensorShape &a_shape, const TensorShape &b_shape, const TensorShape &output_shape, bool transpose_a, bool transpose_b, DataType data_type,
205 ActivationLayerInfo act_info, QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
206 {
207 // We collapse dimensions > 2 onto dimension 2, i.e. 4D+ tensors will look like 3D
208 // This is necessary unless we choose to extend gemm reference for 4D+ tensors
209 TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimZ);
210 TensorShape a_shape_collapsed = a_shape.collapsed_from(Window::DimZ);
211 TensorShape b_shape_collapsed = b_shape.collapsed_from(Window::DimZ);
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000212
213 // Create reference
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100214 SimpleTensor<T> a{ a_shape_collapsed, data_type, 1, a_qinfo };
215 SimpleTensor<T> b{ b_shape_collapsed, data_type, 1, b_qinfo };
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000216 SimpleTensor<T> c{ output_shape_collapsed, data_type, 1 };
217
218 // Fill reference
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000219 fill(a, 2);
220 fill(b, 3);
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000221
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000222 /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if transpose_a is set to true, then A is assumed to be (B x K x M),
223 therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
224 in order to be able to call reference implementation that works with (B x M x K) input.
225 Similarly, if transpose_b is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000226
227 // Define transposed shapes
228 TensorShape a_transposed_shape(a.shape());
229 a_transposed_shape.set(0, a.shape().y());
230 a_transposed_shape.set(1, a.shape().x());
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000231
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000232 TensorShape b_transposed_shape(b.shape());
233 b_transposed_shape.set(0, b.shape().y());
234 b_transposed_shape.set(1, b.shape().x());
235
236 // Define transposed tensors
237 SimpleTensor<T> a_transposed{ a_transposed_shape, data_type };
238 SimpleTensor<T> b_transposed{ b_transposed_shape, data_type };
239
240 // pretranspose a if necessary
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000241 if(transpose_a)
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000242 {
243 a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U));
244 }
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000245 // pretranspose b if necessary
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000246 if(transpose_b)
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000247 {
248 b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U));
249 }
250
251 // Setting beta to 0 will effectively disable C for the
252 // computation of the reference: alpha * A * B + 0 * C
253 // Use transposed tensors if boolean enabled else use original tensors
Viet-Hoa Doa62129a2023-04-26 15:38:45 +0100254 auto result = compute_reference_gemm<T>((transpose_a) ? a_transposed : a, (transpose_b) ? b_transposed : b, c, 1.0f, 0.f, o_qinfo);
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100255
256 result = reference::activation_layer<T>(result, act_info, o_qinfo);
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000257
258 // We reshape the gemm output back if the tensor is high dimensional
259 if(output_shape_collapsed != output_shape)
260 {
261 result = reference::reshape_layer(result, output_shape);
262 }
263
264 return result;
265 }
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000266
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000267 TensorType _target{};
268 SimpleTensor<T> _reference{};
269};
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000270
271template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
272class MatMulValidationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
273{
274public:
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000275 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type)
276 {
277 MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, ActivationLayerInfo(), 0,
278 Settings());
279 }
280};
281
282template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000283class MatMulValidationWithDynamicTensorsFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
284{
285public:
Mohammed Suhail Munshia1b1e412023-03-23 22:21:31 +0000286 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs)
287 {
288 MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings());
289 }
290};
291
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100292template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
293class QuantizedMatMulValidationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
294{
295public:
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100296 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs,
297 QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100298 {
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100299 MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(),
300 a_qinfo, b_qinfo, o_qinfo);
301 }
302};
303
304template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
305class MatMulValidationWithActivationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
306{
307public:
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100308 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info)
309 {
310 MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings());
311 }
312};
313
314template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
315class MatMulValidationWithActivationAlphaBetaFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
316{
317public:
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100318 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo::ActivationFunction function,
319 float alpha_beta)
320 {
321 ActivationLayerInfo act_info(function, alpha_beta, alpha_beta);
322 MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings());
323 }
324};
325
326template <typename TensorType, typename AccessorType, typename FunctionType, typename Settings, typename T>
327class QuantizedMatMulValidationWithActivationFixture : public MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>
328{
329public:
Mohammed Suhail Munshi94abde42023-05-25 16:48:43 +0100330 void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo::ActivationFunction function,
331 float alpha_beta, int num_extra_runs,
332 QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo)
333 {
334 ActivationLayerInfo act_info(function, alpha_beta, alpha_beta);
335 MatMulGenericValidationFixture<TensorType, AccessorType, FunctionType, Settings, T>::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(),
336 a_qinfo, b_qinfo, o_qinfo);
Viet-Hoa Do9c7c2d22023-04-11 17:16:27 +0100337 }
338};
339
Ramy Elgammalf26ea2f2023-03-24 11:42:03 +0000340} // namespace validation
341} // namespace test
342} // namespace arm_compute
Jakub Sujak8f4b3df2023-10-30 16:04:51 +0000343#endif // ACL_TESTS_VALIDATION_FIXTURES_MATMULFIXTURE_H