blob: 94b38c2c816f8a2fb0e6b050c0bfff32a836084f [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Isabella Gottardie6630e42018-01-18 15:50:39 +00002 * Copyright (c) 2017-2018 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000026#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010027#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010028#include "arm_compute/runtime/Tensor.h"
29#include "arm_compute/runtime/TensorAllocator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010030#include "tests/NEON/Accessor.h"
31#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010032#include "tests/datasets/LargeConvolutionLayerDataset.h"
33#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +000034#include "tests/datasets/TinyConvolutionLayerDataset.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010035#include "tests/framework/Asserts.h"
36#include "tests/framework/Macros.h"
37#include "tests/framework/datasets/Datasets.h"
38#include "tests/validation/Validation.h"
39#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010040#include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010041
42namespace arm_compute
43{
44namespace test
45{
46namespace validation
47{
48namespace
49{
Georgios Pinitas8dea6022018-06-08 18:33:31 +010050RelativeTolerance<float> rel_tolerance_f32(0.01f); /**< Relative tolerance for FP32 types */
51const AbsoluteTolerance<float> abs_tolerance_f32(0.002f); /**< Absolute tolerance for FP32 types */
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000052#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Isabella Gottardie6630e42018-01-18 15:50:39 +000053const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
54#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Isabella Gottardie6630e42018-01-18 15:50:39 +000055constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010056
57/** CNN data types */
58const auto CNNDataTypes = framework::dataset::make("DataType",
59{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000060#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010061 DataType::F16,
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000062#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010063 DataType::F32,
Isabella Gottardie6630e42018-01-18 15:50:39 +000064 DataType::QASYMM8,
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010065});
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000066const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
67{
68 ActivationLayerInfo(),
69 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
70 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f)
71});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010072} // namespace
73
74TEST_SUITE(NEON)
Pablo Tello89519332017-11-17 11:52:36 +000075
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000076TEST_SUITE(ConvolutionLayer)
Giorgio Arenaa3221e62018-05-03 15:57:48 +010077DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
78 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32, 0),
79 TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32, 0),
80 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0),
81 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0)
82 }),
83 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32, 0),
84 TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32, 0),
85 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
86 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0)
87 })),
88 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32, 0),
89 TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32, 0),
90 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
91 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0)
92 })),
93 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
94 PadStrideInfo(1, 1, 0, 0),
95 PadStrideInfo(2, 1, 0, 0),
96 PadStrideInfo(3, 2, 1, 0)
97 })),
98 framework::dataset::make("FastMath", { true,
99 true,
100 false,
101 false
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000102 })),
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100103 framework::dataset::make("Expected", { ConvolutionMethod::WINOGRAD, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM })),
104 input_info, weights_info, output_info, conv_info, fast_math, expected)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000105{
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100106 ConvolutionMethod is_valid = NEConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
107 &weights_info.clone()->set_is_resizable(true),
108 &output_info.clone()->set_is_resizable(true), conv_info, WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000109 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
110}
111TEST_SUITE_END()
112
Pablo Tello89519332017-11-17 11:52:36 +0000113TEST_SUITE(WinogradLayer)
114template <typename T>
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100115using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
Pablo Tello89519332017-11-17 11:52:36 +0000116
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000117template <typename T>
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100118using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, false>;
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000119
Pablo Tello89519332017-11-17 11:52:36 +0000120TEST_SUITE(FP32)
Pablo Tello7282d562018-06-14 15:35:49 +0100121
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000122FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tello7282d562018-06-14 15:35:49 +0100123 combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
124 datasets::SmallWinogradConvolutionLayer5x5Dataset()),
125 framework::dataset::make("DataType", { DataType::F32 })),
126 ActivationFunctionsDataset),
127 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
128
Pablo Tello89519332017-11-17 11:52:36 +0000129{
130 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100131 validate(Accessor(_target), _reference, abs_tolerance_f32);
Pablo Tello89519332017-11-17 11:52:36 +0000132}
133
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000134FIXTURE_DATA_TEST_CASE(RunSmallNoBias, NEWinogradConvolutionLayerNoBiasFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tello7282d562018-06-14 15:35:49 +0100135 combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
136 datasets::SmallWinogradConvolutionLayer5x5Dataset()),
137 framework::dataset::make("DataType", { DataType::F32 })),
138 ActivationFunctionsDataset),
139
140 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000141{
142 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100143 validate(Accessor(_target), _reference, abs_tolerance_f32);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000144}
145
Pablo Tello89519332017-11-17 11:52:36 +0000146TEST_SUITE_END()
147TEST_SUITE_END()
Pablo Tello89519332017-11-17 11:52:36 +0000148
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000149TEST_SUITE(GEMMConvolutionLayer)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100150
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000151DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
152 CNNDataTypes),
153 framework::dataset::make("ActivationInfo",
154{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
155input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100156{
157 // Set fixed point position data type allowed
158 int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
159
Isabella Gottardie6630e42018-01-18 15:50:39 +0000160 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
161
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100162 // Create tensors
Isabella Gottardie6630e42018-01-18 15:50:39 +0000163 Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
164 Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
165 Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
166 Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100167
168 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
169 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
170 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
171 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
172
Isabella Gottardie6630e42018-01-18 15:50:39 +0000173 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
174 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
175
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100176 // Create and configure function
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000177 NEGEMMConvolutionLayer conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000178 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100179
180 // Validate valid region
181 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
182 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
183 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
184 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
185
186 validate(src.info()->valid_region(), src_valid_region);
187 validate(weights.info()->valid_region(), weights_valid_region);
188 validate(bias.info()->valid_region(), bias_valid_region);
189 validate(dst.info()->valid_region(), dst_valid_region);
190
Isabella Gottardie6630e42018-01-18 15:50:39 +0000191 // Validate QuantizationInfo
192 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
193 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
194
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100195 // Validate padding
196 //TODO(COMPMID-415) Need to validate padding?
197}
198
199template <typename T>
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000200using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100201
202TEST_SUITE(Float)
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000203#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100204TEST_SUITE(FP16)
Michalis Spyroue2503892018-04-23 15:17:31 +0100205FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
206 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000207 framework::dataset::make("DataType", DataType::F16)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100208 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000209 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100210{
211 // Validate output
212 validate(Accessor(_target), _reference, tolerance_f16);
213}
Michalis Spyroue2503892018-04-23 15:17:31 +0100214FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
215 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000216 framework::dataset::make("DataType", DataType::F16)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100217 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000218 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100219{
220 // Validate output
221 validate(Accessor(_target), _reference, tolerance_f16);
222}
223TEST_SUITE_END()
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000224#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100225
226TEST_SUITE(FP32)
Michalis Spyroue2503892018-04-23 15:17:31 +0100227FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
228 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000229 framework::dataset::make("DataType", DataType::F32)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100230 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000231 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100232{
233 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100234 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100235}
Michalis Spyroue2503892018-04-23 15:17:31 +0100236FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
237 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000238 framework::dataset::make("DataType", DataType::F32)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100239 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000240 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100241{
242 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100243 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100244}
245TEST_SUITE_END()
246TEST_SUITE_END()
247
248template <typename T>
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000249using NEGEMMConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100250
Isabella Gottardie6630e42018-01-18 15:50:39 +0000251template <typename T>
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000252using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
Isabella Gottardie6630e42018-01-18 15:50:39 +0000253
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000254const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
255{
256 ActivationLayerInfo(),
257 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
258 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
259});
Isabella Gottardie6630e42018-01-18 15:50:39 +0000260TEST_SUITE(Quantized)
261TEST_SUITE(QASYMM8)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100262FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Isabella Gottardie6630e42018-01-18 15:50:39 +0000263 framework::dataset::make("ReshapeWeights", { true })),
264 framework::dataset::make("DataType", DataType::QASYMM8)),
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100265 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000266 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
267 QuantizedActivationFunctionsDataset))
Isabella Gottardie6630e42018-01-18 15:50:39 +0000268{
269 // Validate output
270 validate(Accessor(_target), _reference, tolerance_qasymm8);
271}
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100272FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000273 framework::dataset::make("ReshapeWeights", { true })),
274 framework::dataset::make("DataType", DataType::QASYMM8)),
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100275 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000276 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
277 QuantizedActivationFunctionsDataset))
Isabella Gottardie6630e42018-01-18 15:50:39 +0000278{
279 // Validate output
280 validate(Accessor(_target), _reference, tolerance_qasymm8);
281}
282TEST_SUITE_END()
283TEST_SUITE_END()
284
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100285TEST_SUITE_END()
286TEST_SUITE_END()
287} // namespace validation
288} // namespace test
289} // namespace arm_compute