blob: 58f3f0df378e6753ce98cc00d786514fceb64bd4 [file] [log] [blame]
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01001/*
Isabella Gottardie6630e42018-01-18 15:50:39 +00002 * Copyright (c) 2017-2018 ARM Limited.
Moritz Pflanzerb3d25792017-07-26 11:49:37 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Types.h"
25#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000026#include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010027#include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010028#include "arm_compute/runtime/Tensor.h"
29#include "arm_compute/runtime/TensorAllocator.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010030#include "tests/NEON/Accessor.h"
31#include "tests/PaddingCalculator.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010032#include "tests/datasets/LargeConvolutionLayerDataset.h"
33#include "tests/datasets/SmallConvolutionLayerDataset.h"
Anthony Barbier1c0d0ff2018-01-31 13:05:09 +000034#include "tests/datasets/TinyConvolutionLayerDataset.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010035#include "tests/framework/Asserts.h"
36#include "tests/framework/Macros.h"
37#include "tests/framework/datasets/Datasets.h"
38#include "tests/validation/Validation.h"
39#include "tests/validation/fixtures/ConvolutionLayerFixture.h"
Georgios Pinitas9fb11592018-04-26 20:34:58 +010040#include "tests/validation/fixtures/WinogradConvolutionLayerFixture.h"
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010041
42namespace arm_compute
43{
44namespace test
45{
46namespace validation
47{
48namespace
49{
Georgios Pinitas8dea6022018-06-08 18:33:31 +010050RelativeTolerance<float> rel_tolerance_f32(0.01f); /**< Relative tolerance for FP32 types */
51const AbsoluteTolerance<float> abs_tolerance_f32(0.002f); /**< Absolute tolerance for FP32 types */
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000052#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Isabella Gottardie6630e42018-01-18 15:50:39 +000053const AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
54#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Isabella Gottardie6630e42018-01-18 15:50:39 +000055constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010056
57/** CNN data types */
58const auto CNNDataTypes = framework::dataset::make("DataType",
59{
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000060#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010061 DataType::F16,
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +000062#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010063 DataType::F32,
Isabella Gottardie6630e42018-01-18 15:50:39 +000064 DataType::QASYMM8,
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010065});
Isabella Gottardi3f217ec2018-02-12 14:59:19 +000066const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
67{
68 ActivationLayerInfo(),
69 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
70 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f)
71});
Moritz Pflanzerb3d25792017-07-26 11:49:37 +010072} // namespace
73
74TEST_SUITE(NEON)
Pablo Tello89519332017-11-17 11:52:36 +000075
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +000076TEST_SUITE(ConvolutionLayer)
Giorgio Arenaa3221e62018-05-03 15:57:48 +010077DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010078 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32),
79 TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32),
80 TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32),
81 TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32)
Giorgio Arenaa3221e62018-05-03 15:57:48 +010082 }),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010083 framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32),
84 TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32),
85 TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32),
86 TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16)
Giorgio Arenaa3221e62018-05-03 15:57:48 +010087 })),
Vidhya Sudhan Loganathan7485d5a2018-07-04 09:34:00 +010088 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32),
89 TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32),
90 TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32),
91 TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32)
Giorgio Arenaa3221e62018-05-03 15:57:48 +010092 })),
93 framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
94 PadStrideInfo(1, 1, 0, 0),
95 PadStrideInfo(2, 1, 0, 0),
96 PadStrideInfo(3, 2, 1, 0)
97 })),
98 framework::dataset::make("FastMath", { true,
99 true,
100 false,
101 false
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000102 })),
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100103 framework::dataset::make("Expected", { ConvolutionMethod::WINOGRAD, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM })),
104 input_info, weights_info, output_info, conv_info, fast_math, expected)
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000105{
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100106 ConvolutionMethod is_valid = NEConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
107 &weights_info.clone()->set_is_resizable(true),
108 &output_info.clone()->set_is_resizable(true), conv_info, WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math);
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000109 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
110}
111TEST_SUITE_END()
112
Pablo Tello89519332017-11-17 11:52:36 +0000113TEST_SUITE(WinogradLayer)
114template <typename T>
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100115using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
Pablo Tello89519332017-11-17 11:52:36 +0000116
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000117template <typename T>
Giorgio Arenaa3221e62018-05-03 15:57:48 +0100118using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, false>;
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000119
Pablo Tello89519332017-11-17 11:52:36 +0000120TEST_SUITE(FP32)
Pablo Tello7282d562018-06-14 15:35:49 +0100121
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000122FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tello7282d562018-06-14 15:35:49 +0100123 combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
124 datasets::SmallWinogradConvolutionLayer5x5Dataset()),
125 framework::dataset::make("DataType", { DataType::F32 })),
126 ActivationFunctionsDataset),
127 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
128
Pablo Tello89519332017-11-17 11:52:36 +0000129{
130 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100131 validate(Accessor(_target), _reference, abs_tolerance_f32);
Pablo Tello89519332017-11-17 11:52:36 +0000132}
133
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000134FIXTURE_DATA_TEST_CASE(RunSmallNoBias, NEWinogradConvolutionLayerNoBiasFixture<float>, framework::DatasetMode::PRECOMMIT,
Pablo Tello7282d562018-06-14 15:35:49 +0100135 combine(combine(combine(framework::dataset::concat(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
136 datasets::SmallWinogradConvolutionLayer5x5Dataset()),
137 framework::dataset::make("DataType", { DataType::F32 })),
138 ActivationFunctionsDataset),
139
140 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000141{
142 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100143 validate(Accessor(_target), _reference, abs_tolerance_f32);
Andrew Mundy4d9379a2018-03-15 16:47:03 +0000144}
145
Pablo Tello89519332017-11-17 11:52:36 +0000146TEST_SUITE_END()
147TEST_SUITE_END()
Pablo Tello89519332017-11-17 11:52:36 +0000148
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000149TEST_SUITE(GEMMConvolutionLayer)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100150
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000151DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallConvolutionLayerDataset(), datasets::LargeConvolutionLayerDataset()),
152 CNNDataTypes),
153 framework::dataset::make("ActivationInfo",
154{ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })),
155input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, act_info)
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100156{
Isabella Gottardie6630e42018-01-18 15:50:39 +0000157 auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
158
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100159 // Create tensors
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100160 Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
161 Tensor weights = create_tensor<Tensor>(weights_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
162 Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, QuantizationInfo(2.f / 255.f, 127));
163 Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo(2.f / 255.f, 127));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100164
165 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
166 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
167 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
168 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
169
Isabella Gottardie6630e42018-01-18 15:50:39 +0000170 const QuantizationInfo src_quantization_info = src.info()->quantization_info();
171 const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
172
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100173 // Create and configure function
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000174 NEGEMMConvolutionLayer conv;
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000175 conv.configure(&src, &weights, &bias, &dst, info, WeightsInfo(), dilation, act_info);
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100176
177 // Validate valid region
178 const ValidRegion src_valid_region = shape_to_valid_region(input_shape);
179 const ValidRegion weights_valid_region = shape_to_valid_region(weights_shape);
180 const ValidRegion bias_valid_region = shape_to_valid_region(bias_shape);
181 const ValidRegion dst_valid_region = shape_to_valid_region(output_shape);
182
183 validate(src.info()->valid_region(), src_valid_region);
184 validate(weights.info()->valid_region(), weights_valid_region);
185 validate(bias.info()->valid_region(), bias_valid_region);
186 validate(dst.info()->valid_region(), dst_valid_region);
187
Isabella Gottardie6630e42018-01-18 15:50:39 +0000188 // Validate QuantizationInfo
189 ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
190 ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
191
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100192 // Validate padding
193 //TODO(COMPMID-415) Need to validate padding?
194}
195
196template <typename T>
Anthony Barbierc8e84b52018-07-17 16:48:42 +0100197using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100198
199TEST_SUITE(Float)
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000200#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100201TEST_SUITE(FP16)
Michalis Spyroue2503892018-04-23 15:17:31 +0100202FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
203 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000204 framework::dataset::make("DataType", DataType::F16)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100205 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000206 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100207{
208 // Validate output
209 validate(Accessor(_target), _reference, tolerance_f16);
210}
Michalis Spyroue2503892018-04-23 15:17:31 +0100211FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
212 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000213 framework::dataset::make("DataType", DataType::F16)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100214 framework::dataset::make("DataLayout", { DataLayout::NCHW })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000215 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100216{
217 // Validate output
218 validate(Accessor(_target), _reference, tolerance_f16);
219}
220TEST_SUITE_END()
Ioan-Cristian Szabo5edbd1c2017-11-13 13:34:08 +0000221#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100222
223TEST_SUITE(FP32)
Michalis Spyroue2503892018-04-23 15:17:31 +0100224FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
225 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000226 framework::dataset::make("DataType", DataType::F32)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100227 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000228 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100229{
230 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100231 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100232}
Michalis Spyroue2503892018-04-23 15:17:31 +0100233FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
234 framework::dataset::make("ReshapeWeights", { true })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000235 framework::dataset::make("DataType", DataType::F32)),
Michalis Spyroue2503892018-04-23 15:17:31 +0100236 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000237 ActivationFunctionsDataset))
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100238{
239 // Validate output
Georgios Pinitas8dea6022018-06-08 18:33:31 +0100240 validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100241}
242TEST_SUITE_END()
243TEST_SUITE_END()
244
245template <typename T>
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000246using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEGEMMConvolutionLayer, T>;
Isabella Gottardie6630e42018-01-18 15:50:39 +0000247
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000248const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
249{
250 ActivationLayerInfo(),
251 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
252 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f)
253});
Isabella Gottardie6630e42018-01-18 15:50:39 +0000254TEST_SUITE(Quantized)
255TEST_SUITE(QASYMM8)
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100256FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
Isabella Gottardie6630e42018-01-18 15:50:39 +0000257 framework::dataset::make("ReshapeWeights", { true })),
258 framework::dataset::make("DataType", DataType::QASYMM8)),
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100259 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000260 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
261 QuantizedActivationFunctionsDataset))
Isabella Gottardie6630e42018-01-18 15:50:39 +0000262{
263 // Validate output
264 validate(Accessor(_target), _reference, tolerance_qasymm8);
265}
Georgios Pinitas19ea4192018-06-19 13:09:53 +0100266FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
Isabella Gottardi6acc6ad2018-02-02 17:19:18 +0000267 framework::dataset::make("ReshapeWeights", { true })),
268 framework::dataset::make("DataType", DataType::QASYMM8)),
Gian Marco Iodicedb9d46d2018-08-08 12:29:38 +0100269 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
Isabella Gottardi3f217ec2018-02-12 14:59:19 +0000270 framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
271 QuantizedActivationFunctionsDataset))
Isabella Gottardie6630e42018-01-18 15:50:39 +0000272{
273 // Validate output
274 validate(Accessor(_target), _reference, tolerance_qasymm8);
275}
276TEST_SUITE_END()
277TEST_SUITE_END()
278
Moritz Pflanzerb3d25792017-07-26 11:49:37 +0100279TEST_SUITE_END()
280TEST_SUITE_END()
281} // namespace validation
282} // namespace test
283} // namespace arm_compute