Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 1 | /* |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame^] | 2 | * Copyright (c) 2017-2020 ARM Limited. |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/core/Types.h" |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 25 | #include "arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h" |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 26 | #include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h" |
| 27 | #include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h" |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 28 | #include "arm_compute/runtime/Tensor.h" |
| 29 | #include "arm_compute/runtime/TensorAllocator.h" |
| 30 | #include "tests/NEON/Accessor.h" |
Gian Marco Iodice | ab18212 | 2017-10-09 15:05:40 +0100 | [diff] [blame] | 31 | #include "tests/NEON/Helper.h" |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 32 | #include "tests/PaddingCalculator.h" |
George Wort | 2d7e683 | 2019-02-22 16:37:41 +0000 | [diff] [blame] | 33 | #include "tests/datasets/GEMMLowpFusedOffsetOutputDataset.h" |
Gian Marco | fa4cacd | 2017-10-18 17:05:02 +0100 | [diff] [blame] | 34 | #include "tests/datasets/LargeGEMMLowpDataset.h" |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 35 | #include "tests/datasets/ShapeDatasets.h" |
Gian Marco | fa4cacd | 2017-10-18 17:05:02 +0100 | [diff] [blame] | 36 | #include "tests/datasets/SmallGEMMLowpDataset.h" |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 37 | #include "tests/framework/Asserts.h" |
| 38 | #include "tests/framework/Macros.h" |
| 39 | #include "tests/framework/datasets/Datasets.h" |
| 40 | #include "tests/validation/Validation.h" |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 41 | #include "tests/validation/fixtures/GEMMLowpAssemblyFixture.h" |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 42 | #include "tests/validation/fixtures/GEMMLowpFixture.h" |
| 43 | |
| 44 | namespace arm_compute |
| 45 | { |
| 46 | namespace test |
| 47 | { |
| 48 | namespace validation |
| 49 | { |
| 50 | namespace |
| 51 | { |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 52 | const auto data_matrix_multiply = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 16); |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 53 | } // namespace |
| 54 | |
| 55 | TEST_SUITE(NEON) |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 56 | TEST_SUITE(ASSEMBLY_MATRIX_MULTIPLY) |
Michalis Spyrou | f3dfa27 | 2017-11-21 17:52:12 +0000 | [diff] [blame] | 57 | |
| 58 | using NEGEMMAssemblyFixture_S8 = GEMMLowpAssemblyFixture<Tensor, Accessor, NEGEMMLowpAssemblyMatrixMultiplyCore, int8_t>; |
| 59 | using NEGEMMAssemblyFixture_U8 = GEMMLowpAssemblyFixture<Tensor, Accessor, NEGEMMLowpAssemblyMatrixMultiplyCore, uint8_t>; |
| 60 | |
| 61 | TEST_SUITE(S8) |
| 62 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture_S8, framework::DatasetMode::PRECOMMIT, data_matrix_multiply) |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 63 | { |
| 64 | // Validate output |
| 65 | validate(Accessor(_target), _reference); |
| 66 | } |
| 67 | TEST_SUITE_END() |
| 68 | |
Michalis Spyrou | f3dfa27 | 2017-11-21 17:52:12 +0000 | [diff] [blame] | 69 | TEST_SUITE(U8) |
| 70 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture_U8, framework::DatasetMode::PRECOMMIT, data_matrix_multiply) |
| 71 | { |
| 72 | // Validate output |
| 73 | validate(Accessor(_target), _reference); |
| 74 | } |
| 75 | TEST_SUITE_END() |
| 76 | TEST_SUITE_END() |
| 77 | |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 78 | TEST_SUITE(GEMMLowp) |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 79 | TEST_SUITE(MatrixMultiplyCore) |
| 80 | using NEGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>; |
Gian Marco | fa4cacd | 2017-10-18 17:05:02 +0100 | [diff] [blame] | 81 | |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 82 | DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()), |
| 83 | shape_a, shape_b, shape_c, a_offset, b_offset) |
Gian Marco | fa4cacd | 2017-10-18 17:05:02 +0100 | [diff] [blame] | 84 | { |
| 85 | // Create tensors |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 86 | Tensor a = create_tensor<Tensor>(shape_a, DataType::QASYMM8); |
| 87 | Tensor b = create_tensor<Tensor>(shape_b, DataType::QASYMM8); |
| 88 | Tensor c = create_tensor<Tensor>(shape_c, DataType::S32); |
| 89 | |
| 90 | a.info()->set_quantization_info(QuantizationInfo(1.0f / 255, a_offset)); |
| 91 | b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset)); |
Gian Marco | fa4cacd | 2017-10-18 17:05:02 +0100 | [diff] [blame] | 92 | |
| 93 | ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 94 | ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 95 | ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 96 | |
| 97 | // Create and configure function |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 98 | NEGEMMLowpMatrixMultiplyCore gemmlowp_mm; |
Gian Marco Iodice | 4b90865 | 2018-10-18 10:21:02 +0100 | [diff] [blame] | 99 | gemmlowp_mm.configure(&a, &b, nullptr, &c); |
Gian Marco | fa4cacd | 2017-10-18 17:05:02 +0100 | [diff] [blame] | 100 | } |
| 101 | |
Georgios Pinitas | a3b1b46 | 2017-11-16 19:24:39 +0000 | [diff] [blame] | 102 | // *INDENT-OFF* |
| 103 | // clang-format off |
| 104 | DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( |
| 105 | framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Input not a multiple of 4 |
Vidhya Sudhan Loganathan | 7485d5a | 2018-07-04 09:34:00 +0100 | [diff] [blame] | 106 | TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Mismatching data type |
Georgios Pinitas | a3b1b46 | 2017-11-16 19:24:39 +0000 | [diff] [blame] | 107 | TensorInfo(TensorShape(20U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions |
| 108 | TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), // Invalid dimensions |
| 109 | TensorInfo(TensorShape(16U, 32U), 1, DataType::QASYMM8, QuantizationInfo(1.f/255, 10)), |
| 110 | }), |
| 111 | framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)), |
| 112 | TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)), |
| 113 | TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)), |
| 114 | TensorInfo(TensorShape(33U, 21U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)), |
| 115 | TensorInfo(TensorShape(64U, 16U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 10)), |
| 116 | })), |
| 117 | framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(33U, 13U), 1, DataType::S32), |
| 118 | TensorInfo(TensorShape(33U, 13U), 1, DataType::S32), |
| 119 | TensorInfo(TensorShape(33U, 13U), 1, DataType::S32), |
| 120 | TensorInfo(TensorShape(8U, 11U), 1, DataType::S32), |
| 121 | TensorInfo(TensorShape(64U, 32U), 1, DataType::S32), |
| 122 | })), |
Georgios Pinitas | 631c41a | 2017-12-06 11:53:03 +0000 | [diff] [blame] | 123 | framework::dataset::make("Expected", { false, false, false, false, true })), |
Georgios Pinitas | a3b1b46 | 2017-11-16 19:24:39 +0000 | [diff] [blame] | 124 | a_info, b_info, output_info, expected) |
| 125 | { |
| 126 | // Lock tensors |
Georgios Pinitas | 631c41a | 2017-12-06 11:53:03 +0000 | [diff] [blame] | 127 | Status status = NEGEMMLowpMatrixMultiplyCore::validate(&a_info.clone()->set_is_resizable(false), |
| 128 | &b_info.clone()->set_is_resizable(false), |
Gian Marco Iodice | 4b90865 | 2018-10-18 10:21:02 +0100 | [diff] [blame] | 129 | nullptr, |
Georgios Pinitas | 631c41a | 2017-12-06 11:53:03 +0000 | [diff] [blame] | 130 | &output_info.clone()->set_is_resizable(false)); |
| 131 | ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); |
Georgios Pinitas | a3b1b46 | 2017-11-16 19:24:39 +0000 | [diff] [blame] | 132 | } |
| 133 | // clang-format on |
| 134 | // *INDENT-ON* |
| 135 | |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 136 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset()) |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 137 | { |
| 138 | // Validate output |
Gian Marco | fa4cacd | 2017-10-18 17:05:02 +0100 | [diff] [blame] | 139 | validate(Accessor(_target), _reference); |
| 140 | } |
| 141 | |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 142 | FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpDataset()) |
Gian Marco | fa4cacd | 2017-10-18 17:05:02 +0100 | [diff] [blame] | 143 | { |
| 144 | // Validate output |
| 145 | validate(Accessor(_target), _reference); |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 146 | } |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 147 | |
George Wort | 2d7e683 | 2019-02-22 16:37:41 +0000 | [diff] [blame] | 148 | using NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture = GEMMLowpMatrixMultiplyCoreFusedOffsetOutputValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>; |
| 149 | TEST_SUITE(FusedOffsetOutput) |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame^] | 150 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpFusedOffsetOutputUint8Dataset(), |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 151 | framework::dataset::make("DataType", { DataType::QASYMM8 }))) |
George Wort | 2d7e683 | 2019-02-22 16:37:41 +0000 | [diff] [blame] | 152 | { |
| 153 | // Validate output |
| 154 | validate(Accessor(_target), _reference); |
| 155 | } |
| 156 | |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame^] | 157 | FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpMatrixMultiplyCoreFusedOffsetOutputFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpFusedOffsetOutputUint8Dataset(), |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 158 | framework::dataset::make("DataType", { DataType::QASYMM8 }))) |
George Wort | 2d7e683 | 2019-02-22 16:37:41 +0000 | [diff] [blame] | 159 | { |
| 160 | // Validate output |
| 161 | validate(Accessor(_target), _reference); |
| 162 | } |
| 163 | TEST_SUITE_END() // FusedOffsetOutput |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 164 | TEST_SUITE_END() // MatrixMultiplyCore |
| 165 | |
| 166 | TEST_SUITE(OutputStage) |
| 167 | |
| 168 | TEST_SUITE(QuantizeDownInt32ToUint8Scale) |
| 169 | |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 170 | const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2, |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 171 | 3) |
| 172 | * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true }); |
| 173 | |
Gian Marco | 05288a2 | 2017-11-21 10:57:50 +0000 | [diff] [blame] | 174 | const auto quantize_down_int32_to_uint8_scale_relu_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 175 | 2) |
| 176 | * framework::dataset::make("result_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true }); |
| 177 | |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 178 | using NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture = GEMMLowpQuantizeDownInt32ToUint8ScaleValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8Scale>; |
| 179 | |
Gian Marco | 7f0f790 | 2017-12-07 09:26:56 +0000 | [diff] [blame] | 180 | // *INDENT-OFF* |
| 181 | // clang-format off |
| 182 | DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( |
| 183 | framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16 |
| 184 | TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max |
| 185 | TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type |
| 186 | }), |
| 187 | framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32), |
| 188 | TensorInfo(TensorShape(21U), 1, DataType::S32), |
| 189 | TensorInfo(TensorShape(20U), 1, DataType::S32), |
| 190 | })), |
| 191 | framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), |
| 192 | TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), |
| 193 | TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), |
| 194 | })), |
| 195 | framework::dataset::make("Min",{ 0, |
| 196 | 8, |
| 197 | 13, |
| 198 | })), |
| 199 | framework::dataset::make("Max",{ 205, |
| 200 | 300, |
| 201 | 180, |
| 202 | })), |
| 203 | framework::dataset::make("Expected", { true, false, false })), |
| 204 | a_info, b_info, output_info, min, max, expected) |
| 205 | { |
| 206 | // Lock tensors |
| 207 | Status status = NEGEMMLowpQuantizeDownInt32ToUint8Scale::validate(&a_info.clone()->set_is_resizable(false), |
| 208 | &b_info.clone()->set_is_resizable(false), |
| 209 | &output_info.clone()->set_is_resizable(false), |
| 210 | min, |
| 211 | max); |
| 212 | ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); |
| 213 | } |
| 214 | // clang-format on |
| 215 | // *INDENT-ON* |
| 216 | |
Michalis Spyrou | 5c9f0c4 | 2019-01-16 14:48:48 +0000 | [diff] [blame] | 217 | DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases), |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 218 | shape, result_offset, result_mult_int, result_shift, min, max, add_bias) |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 219 | { |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 220 | TensorShape shape_bias(shape[0]); |
| 221 | |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 222 | // Create tensors |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 223 | Tensor in = create_tensor<Tensor>(shape, DataType::S32); |
| 224 | Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32); |
| 225 | Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8); |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 226 | |
| 227 | ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 228 | ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 229 | ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 230 | |
| 231 | // Create and configure function |
| 232 | NEGEMMLowpQuantizeDownInt32ToUint8Scale output_stage; |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 233 | output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_offset, result_mult_int, result_shift, min, max); |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 234 | |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 235 | // Validate valid region input and output |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 236 | const ValidRegion valid_region = shape_to_valid_region(shape); |
| 237 | validate(in.info()->valid_region(), valid_region); |
| 238 | validate(out.info()->valid_region(), valid_region); |
| 239 | |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 240 | // Validate valid region bias |
| 241 | if(add_bias) |
| 242 | { |
| 243 | const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); |
| 244 | validate(bias.info()->valid_region(), valid_region_bias); |
| 245 | } |
| 246 | |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 247 | // Validate padding |
Gian Marco | 7f0f790 | 2017-12-07 09:26:56 +0000 | [diff] [blame] | 248 | const PaddingSize padding(0); |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 249 | validate(in.info()->padding(), padding); |
| 250 | validate(out.info()->padding(), padding); |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 251 | |
| 252 | if(add_bias) |
| 253 | { |
| 254 | validate(bias.info()->padding(), padding); |
| 255 | } |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_cases)) |
Pablo Tello | bf2fb95 | 2017-09-29 16:43:25 +0100 | [diff] [blame] | 259 | { |
| 260 | // Validate output |
Gian Marco | fa4cacd | 2017-10-18 17:05:02 +0100 | [diff] [blame] | 261 | validate(Accessor(_target), _reference); |
Pablo Tello | bf2fb95 | 2017-09-29 16:43:25 +0100 | [diff] [blame] | 262 | } |
Pablo Tello | 6ff12a0 | 2017-11-02 16:09:35 +0000 | [diff] [blame] | 263 | |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 264 | FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_cases)) |
| 265 | { |
| 266 | // Validate output |
| 267 | validate(Accessor(_target), _reference); |
| 268 | } |
| 269 | |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 270 | TEST_SUITE(BoundedReLu) |
| 271 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), quantize_down_int32_to_uint8_scale_relu_cases)) |
| 272 | { |
| 273 | // Validate output |
| 274 | validate(Accessor(_target), _reference); |
| 275 | } |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 276 | |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 277 | FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), quantize_down_int32_to_uint8_scale_relu_cases)) |
| 278 | { |
| 279 | // Validate output |
| 280 | validate(Accessor(_target), _reference); |
| 281 | } |
| 282 | TEST_SUITE_END() // BoundedReLu |
| 283 | |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 284 | TEST_SUITE_END() // QuantizeDownInt32ToUint8Scale |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 285 | |
| 286 | TEST_SUITE(QuantizeDownInt32ToUint8ScaleByFixedPoint) |
| 287 | |
| 288 | const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, |
| 289 | 2) |
| 290 | * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true }); |
| 291 | |
| 292 | const auto quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, |
| 293 | 2) |
| 294 | * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0, 2) * framework::dataset::make("max", 171, 174) * framework::dataset::make("addBias", { false, true }); |
| 295 | |
| 296 | using NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture = |
| 297 | GEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint>; |
| 298 | |
Gian Marco Iodice | bc415af | 2019-06-13 15:58:32 +0100 | [diff] [blame] | 299 | using NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture = |
| 300 | GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>; |
| 301 | |
Gian Marco | 7f0f790 | 2017-12-07 09:26:56 +0000 | [diff] [blame] | 302 | // *INDENT-OFF* |
| 303 | // clang-format off |
| 304 | DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( |
| 305 | framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16 |
| 306 | TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max |
| 307 | TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type |
| 308 | }), |
| 309 | framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32), |
| 310 | TensorInfo(TensorShape(21U), 1, DataType::S32), |
| 311 | TensorInfo(TensorShape(20U), 1, DataType::S32), |
| 312 | })), |
| 313 | framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), |
| 314 | TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8), |
| 315 | TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), |
| 316 | })), |
| 317 | framework::dataset::make("Min",{ 0, |
| 318 | 8, |
| 319 | 13, |
| 320 | })), |
| 321 | framework::dataset::make("Max",{ 205, |
| 322 | 300, |
| 323 | 180, |
| 324 | })), |
| 325 | framework::dataset::make("Expected", { true, false, false })), |
| 326 | a_info, b_info, output_info, min, max, expected) |
| 327 | { |
| 328 | // Lock tensors |
| 329 | Status status = NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false), |
| 330 | &b_info.clone()->set_is_resizable(false), |
| 331 | &output_info.clone()->set_is_resizable(false), |
| 332 | min, |
| 333 | max); |
| 334 | ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); |
| 335 | } |
| 336 | // clang-format on |
| 337 | // *INDENT-ON* |
| 338 | |
Michalis Spyrou | 5c9f0c4 | 2019-01-16 14:48:48 +0000 | [diff] [blame] | 339 | DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 340 | quantize_down_int32_to_uint8_scale_by_fixedpoint_cases), |
| 341 | shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias) |
| 342 | { |
| 343 | TensorShape shape_bias(shape[0]); |
| 344 | |
| 345 | // Create tensors |
| 346 | Tensor in = create_tensor<Tensor>(shape, DataType::S32); |
| 347 | Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32); |
| 348 | Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8); |
| 349 | |
| 350 | ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 351 | ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 352 | ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 353 | |
| 354 | // Create and configure function |
| 355 | NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint output_stage; |
| 356 | output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); |
| 357 | |
| 358 | // Validate valid region input and output |
| 359 | const ValidRegion valid_region = shape_to_valid_region(shape); |
| 360 | validate(in.info()->valid_region(), valid_region); |
| 361 | validate(out.info()->valid_region(), valid_region); |
| 362 | |
| 363 | // Validate valid region bias |
| 364 | if(add_bias) |
| 365 | { |
| 366 | const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); |
| 367 | validate(bias.info()->valid_region(), valid_region_bias); |
| 368 | } |
| 369 | |
| 370 | // Validate padding |
Gian Marco | 7f0f790 | 2017-12-07 09:26:56 +0000 | [diff] [blame] | 371 | const PaddingSize padding(0); |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 372 | validate(in.info()->padding(), padding); |
| 373 | validate(out.info()->padding(), padding); |
| 374 | |
| 375 | if(add_bias) |
| 376 | { |
| 377 | validate(bias.info()->padding(), padding); |
| 378 | } |
| 379 | } |
| 380 | |
| 381 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
| 382 | quantize_down_int32_to_uint8_scale_by_fixedpoint_cases)) |
| 383 | { |
| 384 | // Validate output |
| 385 | validate(Accessor(_target), _reference); |
| 386 | } |
| 387 | |
| 388 | FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), |
| 389 | quantize_down_int32_to_uint8_scale_by_fixedpoint_cases)) |
| 390 | { |
| 391 | // Validate output |
| 392 | validate(Accessor(_target), _reference); |
| 393 | } |
| 394 | |
| 395 | TEST_SUITE(BoundedReLu) |
| 396 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
| 397 | quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases)) |
| 398 | { |
| 399 | // Validate output |
| 400 | validate(Accessor(_target), _reference); |
| 401 | } |
| 402 | |
| 403 | FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), |
| 404 | quantize_down_int32_to_uint8_scale_by_fixedpoint_relu_cases)) |
| 405 | { |
| 406 | // Validate output |
| 407 | validate(Accessor(_target), _reference); |
| 408 | } |
| 409 | TEST_SUITE_END() // BoundedReLu |
| 410 | |
| 411 | TEST_SUITE_END() // QuantizeDownInt32ToUint8ScaleByFixedPoint |
Gian Marco Iodice | bc415af | 2019-06-13 15:58:32 +0100 | [diff] [blame] | 412 | |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 413 | TEST_SUITE(QuantizeDownInt32ToInt8ScaleByFixedPoint) |
| 414 | |
| 415 | const auto quantize_down_int32_to_int8_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, |
| 416 | 2) |
| 417 | * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true }); |
| 418 | |
| 419 | const auto quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, |
| 420 | 2) |
| 421 | * framework::dataset::make("result_offset_after_shift", 2, 3) * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true }); |
| 422 | |
| 423 | using NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture = |
| 424 | GEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint>; |
| 425 | |
| 426 | // *INDENT-OFF* |
| 427 | // clang-format off |
| 428 | DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( |
| 429 | framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::F32), // Invalid input data type |
| 430 | TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max |
| 431 | TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type |
| 432 | TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), |
| 433 | }), |
| 434 | framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32), |
| 435 | TensorInfo(TensorShape(21U), 1, DataType::S32), |
| 436 | TensorInfo(TensorShape(20U), 1, DataType::S32), |
| 437 | TensorInfo(TensorShape(21U), 1, DataType::S32), |
| 438 | })), |
| 439 | framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED), |
| 440 | TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED), |
| 441 | TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), |
| 442 | TensorInfo(TensorShape(21U, 13U), 1, DataType::QASYMM8_SIGNED), |
| 443 | })), |
| 444 | framework::dataset::make("Min",{ -110, |
| 445 | -130, |
| 446 | -113, |
| 447 | -113, |
| 448 | })), |
| 449 | framework::dataset::make("Max",{ 87, |
| 450 | 140, |
| 451 | 97, |
| 452 | 97, |
| 453 | })), |
| 454 | framework::dataset::make("Expected", { false, false, false, true })), |
| 455 | a_info, b_info, output_info, min, max, expected) |
| 456 | { |
| 457 | // Lock tensors |
| 458 | Status status = NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false), |
| 459 | &b_info.clone()->set_is_resizable(false), |
| 460 | &output_info.clone()->set_is_resizable(false), |
| 461 | min, |
| 462 | max); |
| 463 | ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); |
| 464 | } |
| 465 | // clang-format on |
| 466 | // *INDENT-ON* |
| 467 | |
| 468 | DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
| 469 | quantize_down_int32_to_int8_scale_by_fixedpoint_cases), |
| 470 | shape, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, add_bias) |
| 471 | { |
| 472 | TensorShape shape_bias(shape[0]); |
| 473 | |
| 474 | // Create tensors |
| 475 | Tensor in = create_tensor<Tensor>(shape, DataType::S32); |
| 476 | Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32); |
| 477 | Tensor out = create_tensor<Tensor>(shape, DataType::QASYMM8_SIGNED); |
| 478 | |
| 479 | ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 480 | ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 481 | ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 482 | |
| 483 | // Create and configure function |
| 484 | NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint output_stage; |
| 485 | output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); |
| 486 | |
| 487 | // Validate valid region input and output |
| 488 | const ValidRegion valid_region = shape_to_valid_region(shape); |
| 489 | validate(in.info()->valid_region(), valid_region); |
| 490 | validate(out.info()->valid_region(), valid_region); |
| 491 | |
| 492 | // Validate valid region bias |
| 493 | if(add_bias) |
| 494 | { |
| 495 | const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); |
| 496 | validate(bias.info()->valid_region(), valid_region_bias); |
| 497 | } |
| 498 | |
| 499 | // Validate padding |
| 500 | const PaddingSize padding(0); |
| 501 | validate(in.info()->padding(), padding); |
| 502 | validate(out.info()->padding(), padding); |
| 503 | |
| 504 | if(add_bias) |
| 505 | { |
| 506 | validate(bias.info()->padding(), padding); |
| 507 | } |
| 508 | } |
| 509 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
| 510 | quantize_down_int32_to_int8_scale_by_fixedpoint_cases)) |
| 511 | { |
| 512 | // Validate output |
| 513 | validate(Accessor(_target), _reference); |
| 514 | } |
| 515 | |
| 516 | TEST_SUITE(BoundedReLu) |
| 517 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
| 518 | quantize_down_int32_to_int8_scale_by_fixedpoint_relu_cases)) |
| 519 | { |
| 520 | // Validate output |
| 521 | validate(Accessor(_target), _reference); |
| 522 | } |
| 523 | TEST_SUITE_END() // BoundedReLu |
| 524 | TEST_SUITE_END() // QuantizeDownInt32ToInt8ScaleByFixedPoint |
| 525 | |
Gian Marco Iodice | bc415af | 2019-06-13 15:58:32 +0100 | [diff] [blame] | 526 | TEST_SUITE(QuantizeDownInt32ToInt16ScaleByFixedPoint) |
| 527 | |
| 528 | const auto quantize_down_int32_to_int16_scale_by_fixedpoint_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, |
| 529 | 2) |
| 530 | * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true }); |
| 531 | |
| 532 | const auto quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, 254601602) * framework::dataset::make("result_shift", 1, |
| 533 | 2) |
| 534 | * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true }); |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 535 | const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases = framework::dataset::make("result_fixedpoint_multiplier", 1073741823, |
| 536 | 1073741825) |
| 537 | * framework::dataset::make("result_shift", -3, |
| 538 | -2) |
| 539 | * framework::dataset::make("min", 0) * framework::dataset::make("max", 0) * framework::dataset::make("addBias", { false, true }); |
Manuel Bottini | 0726398 | 2019-10-17 18:37:26 +0100 | [diff] [blame] | 540 | |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 541 | const auto quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases = framework::dataset::make("result_fixedpoint_multiplier", 254601600, |
| 542 | 254601602) |
| 543 | * framework::dataset::make("result_shift", -3, |
| 544 | -1) |
| 545 | * framework::dataset::make("min", -2, 0) * framework::dataset::make("max", 1, 3) * framework::dataset::make("addBias", { false, true }); |
Gian Marco Iodice | bc415af | 2019-06-13 15:58:32 +0100 | [diff] [blame] | 546 | |
| 547 | using NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture = |
| 548 | GEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointValidationFixture<Tensor, Accessor, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint>; |
| 549 | |
| 550 | // *INDENT-OFF* |
| 551 | // clang-format off |
| 552 | DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( |
| 553 | framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Input not a multiple of 16 |
| 554 | TensorInfo(TensorShape(21U, 13U), 1, DataType::S32), // Invalid min and max |
| 555 | TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), // Wrong output data type |
| 556 | }), |
| 557 | framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(21U), 1, DataType::S32), |
| 558 | TensorInfo(TensorShape(21U), 1, DataType::S32), |
| 559 | TensorInfo(TensorShape(20U), 1, DataType::S32), |
| 560 | })), |
| 561 | framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16), |
| 562 | TensorInfo(TensorShape(21U, 13U), 1, DataType::QSYMM16), |
| 563 | TensorInfo(TensorShape(20U, 13U), 1, DataType::S32), |
| 564 | })), |
| 565 | framework::dataset::make("Min",{ -205, |
| 566 | -60000, |
| 567 | -180, |
| 568 | })), |
| 569 | framework::dataset::make("Max",{ 205, |
| 570 | 60000, |
| 571 | 180, |
| 572 | })), |
| 573 | framework::dataset::make("Expected", { true, false, false })), |
| 574 | a_info, b_info, output_info, min, max, expected) |
| 575 | { |
| 576 | // Lock tensors |
| 577 | Status status = NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&a_info.clone()->set_is_resizable(false), |
| 578 | &b_info.clone()->set_is_resizable(false), |
| 579 | &output_info.clone()->set_is_resizable(false), |
| 580 | min, |
| 581 | max); |
| 582 | ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); |
| 583 | } |
| 584 | // clang-format on |
| 585 | // *INDENT-ON* |
| 586 | |
| 587 | DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
| 588 | quantize_down_int32_to_int16_scale_by_fixedpoint_cases), |
| 589 | shape, result_fixedpoint_multiplier, result_shift, min, max, add_bias) |
| 590 | { |
| 591 | TensorShape shape_bias(shape[0]); |
| 592 | |
| 593 | // Create tensors |
| 594 | Tensor in = create_tensor<Tensor>(shape, DataType::S32); |
| 595 | Tensor bias = create_tensor<Tensor>(shape_bias, DataType::S32); |
| 596 | Tensor out = create_tensor<Tensor>(shape, DataType::QSYMM16); |
| 597 | |
| 598 | ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 599 | ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 600 | ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS); |
| 601 | |
| 602 | // Create and configure function |
| 603 | NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint output_stage; |
| 604 | output_stage.configure(&in, add_bias ? &bias : nullptr, &out, result_fixedpoint_multiplier, result_shift, min, max); |
| 605 | |
| 606 | // Validate valid region input and output |
| 607 | const ValidRegion valid_region = shape_to_valid_region(shape); |
| 608 | validate(in.info()->valid_region(), valid_region); |
| 609 | validate(out.info()->valid_region(), valid_region); |
| 610 | |
| 611 | // Validate valid region bias |
| 612 | if(add_bias) |
| 613 | { |
| 614 | const ValidRegion valid_region_bias = shape_to_valid_region(shape_bias); |
| 615 | validate(bias.info()->valid_region(), valid_region_bias); |
| 616 | } |
| 617 | |
| 618 | // Validate padding |
| 619 | const PaddingSize padding(0); |
| 620 | validate(in.info()->padding(), padding); |
| 621 | validate(out.info()->padding(), padding); |
| 622 | |
| 623 | if(add_bias) |
| 624 | { |
| 625 | validate(bias.info()->padding(), padding); |
| 626 | } |
| 627 | } |
Manuel Bottini | 0726398 | 2019-10-17 18:37:26 +0100 | [diff] [blame] | 628 | TEST_SUITE(NoRelu) |
| 629 | TEST_SUITE(MultSmallerEq1) |
Gian Marco Iodice | bc415af | 2019-06-13 15:58:32 +0100 | [diff] [blame] | 630 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
| 631 | quantize_down_int32_to_int16_scale_by_fixedpoint_cases)) |
| 632 | { |
| 633 | // Validate output |
| 634 | validate(Accessor(_target), _reference); |
| 635 | } |
Manuel Bottini | 0726398 | 2019-10-17 18:37:26 +0100 | [diff] [blame] | 636 | TEST_SUITE_END() // MultSmallerEq1 |
| 637 | TEST_SUITE(MultGreater1) |
| 638 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
| 639 | quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_cases)) |
| 640 | { |
| 641 | // Validate output |
| 642 | validate(Accessor(_target), _reference); |
| 643 | } |
| 644 | TEST_SUITE_END() // MultGreater1 |
| 645 | TEST_SUITE_END() // NoRelu |
Gian Marco Iodice | bc415af | 2019-06-13 15:58:32 +0100 | [diff] [blame] | 646 | TEST_SUITE(BoundedReLu) |
Manuel Bottini | 0726398 | 2019-10-17 18:37:26 +0100 | [diff] [blame] | 647 | TEST_SUITE(MultSmallerEq1) |
Gian Marco Iodice | bc415af | 2019-06-13 15:58:32 +0100 | [diff] [blame] | 648 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
| 649 | quantize_down_int32_to_int16_scale_by_fixedpoint_relu_cases)) |
| 650 | { |
| 651 | // Validate output |
| 652 | validate(Accessor(_target), _reference); |
| 653 | } |
Manuel Bottini | 0726398 | 2019-10-17 18:37:26 +0100 | [diff] [blame] | 654 | TEST_SUITE_END() // MultSmallerEq1 |
| 655 | TEST_SUITE(MultGreater1) |
| 656 | FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointFixture, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), |
| 657 | quantize_down_int32_to_int16_scale_by_fixedpoint_multgreat1_relu_cases)) |
| 658 | { |
| 659 | // Validate output |
| 660 | validate(Accessor(_target), _reference); |
| 661 | } |
| 662 | TEST_SUITE_END() // MultGreater1 |
Gian Marco Iodice | bc415af | 2019-06-13 15:58:32 +0100 | [diff] [blame] | 663 | TEST_SUITE_END() // BoundedReLu |
Gian Marco Iodice | bc415af | 2019-06-13 15:58:32 +0100 | [diff] [blame] | 664 | TEST_SUITE_END() // QuantizeDownInt32ToInt16ScaleByFixedPoint |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 665 | TEST_SUITE_END() // OutputStage |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 666 | TEST_SUITE_END() // GEMMLowp |
| 667 | TEST_SUITE_END() // NEON |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 668 | } // namespace validation |
| 669 | } // namespace test |
| 670 | } // namespace arm_compute |