blob: 7d53c1de375faf0133d608c11f058ca2558267d7 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gunes Bayir9d0c4de2023-04-13 18:22:58 +01002 * Copyright (c) 2017-2023 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Anitha Rajb566b6e2023-08-23 11:40:06 +010024#ifndef ACL_TESTS_VALIDATION_HELPERS_H
25#define ACL_TESTS_VALIDATION_HELPERS_H
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
Georgios Pinitas7b7858d2017-06-21 16:44:24 +010027#include "arm_compute/core/Types.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010028#include "arm_compute/core/Utils.h"
SiCong Li91295492023-07-21 18:16:13 +010029#include "arm_compute/function_info/ActivationLayerInfo.h"
Moritz Pflanzer6c6597c2017-09-24 12:09:41 +010030#include "support/Half.h"
John Richardson6f4d49f2017-09-07 11:21:10 +010031#include "tests/Globals.h"
Moritz Pflanzer6c6597c2017-09-24 12:09:41 +010032#include "tests/SimpleTensor.h"
Georgios Pinitas7b7858d2017-06-21 16:44:24 +010033
Gunes Bayir532ce2c2023-09-14 09:13:49 +010034#include <cmath>
35#include <cstdint>
Georgios Pinitas7b7858d2017-06-21 16:44:24 +010036#include <random>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010037#include <type_traits>
38#include <utility>
39
40namespace arm_compute
41{
42namespace test
43{
44namespace validation
45{
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010046template <typename T>
47struct is_floating_point : public std::is_floating_point<T>
Pablo Tello8fda1cb2017-07-05 15:20:38 +010048{
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010049};
50
51template <>
Georgios Pinitas583137c2017-08-31 18:12:42 +010052struct is_floating_point<half> : public std::true_type
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010053{
54};
Pablo Tello8fda1cb2017-07-05 15:20:38 +010055
Gunes Bayir532ce2c2023-09-14 09:13:49 +010056/** Helper struct to store the hints for
57 * - destination quantization info
58 * - minimum bias value
59 * - maximum bias value
60 * in quantized test construction.
61 */
62struct QuantizationHint
63{
64 QuantizationInfo q_info;
65 int32_t bias_min;
66 int32_t bias_max;
67};
68
Anthony Barbier6ff3b192017-09-04 18:44:23 +010069/** Helper function to get the testing range for each activation layer.
70 *
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010071 * @param[in] activation Activation function to test.
72 * @param[in] data_type Data type.
Anthony Barbier6ff3b192017-09-04 18:44:23 +010073 *
74 * @return A pair containing the lower upper testing bounds for a given function.
75 */
76template <typename T>
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010077std::pair<T, T> get_activation_layer_test_bounds(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010078{
Anthony Barbier6ff3b192017-09-04 18:44:23 +010079 std::pair<T, T> bounds;
80
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010081 switch(data_type)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010082 {
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010083 case DataType::F16:
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010084 {
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010085 using namespace half_float::literal;
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010086
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010087 switch(activation)
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010088 {
Georgios Pinitas3463a8b2018-08-23 13:11:53 +010089 case ActivationLayerInfo::ActivationFunction::TANH:
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010090 case ActivationLayerInfo::ActivationFunction::SQUARE:
91 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
92 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
93 // Reduce range as exponent overflows
Georgios Pinitas3463a8b2018-08-23 13:11:53 +010094 bounds = std::make_pair(-2._h, 2._h);
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010095 break;
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010096 case ActivationLayerInfo::ActivationFunction::SQRT:
97 // Reduce range as sqrt should take a non-negative number
Georgios Pinitas3463a8b2018-08-23 13:11:53 +010098 bounds = std::make_pair(0._h, 128._h);
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010099 break;
100 default:
Moritz Pflanzera09de0c2017-09-01 20:41:12 +0100101 bounds = std::make_pair(-255._h, 255._h);
102 break;
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +0100103 }
Moritz Pflanzera09de0c2017-09-01 20:41:12 +0100104 break;
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +0100105 }
Moritz Pflanzera09de0c2017-09-01 20:41:12 +0100106 case DataType::F32:
107 switch(activation)
108 {
Gunes Bayir01934e92022-11-02 11:50:37 +0000109 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
110 // Reduce range as exponent overflows
111 bounds = std::make_pair(-40.f, 40.f);
112 break;
Moritz Pflanzera09de0c2017-09-01 20:41:12 +0100113 case ActivationLayerInfo::ActivationFunction::SQRT:
114 // Reduce range as sqrt should take a non-negative number
115 bounds = std::make_pair(0.f, 255.f);
116 break;
117 default:
118 bounds = std::make_pair(-255.f, 255.f);
119 break;
120 }
121 break;
Moritz Pflanzera09de0c2017-09-01 20:41:12 +0100122 default:
123 ARM_COMPUTE_ERROR("Unsupported data type");
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +0100124 }
125
Moritz Pflanzera09de0c2017-09-01 20:41:12 +0100126 return bounds;
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +0100127}
128
Sang-Hoon Park0779fec2019-11-13 17:08:12 +0000129/** Convert an asymmetric quantized simple tensor into float using tensor quantization information.
Anton Lokhmotovaf6204c2017-11-08 09:34:19 +0000130 *
131 * @param[in] src Quantized tensor.
132 *
133 * @return Float tensor.
Anthony Barbierf202e502017-11-23 18:02:04 +0000134 */
Michalis Spyroued7b27d2019-11-27 16:04:17 +0000135template <typename T>
136SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<T> &src);
Michele Di Giorgio578a9fc2019-08-23 11:49:04 +0100137
Anton Lokhmotovaf6204c2017-11-08 09:34:19 +0000138/** Convert float simple tensor into quantized using specified quantization information.
139 *
140 * @param[in] src Float tensor.
141 * @param[in] quantization_info Quantification information.
142 *
ramy.elgammal@arm.coma2561f02023-06-16 20:45:48 +0100143 * \relates arm_compute::test::SimpleTensor
Anton Lokhmotovaf6204c2017-11-08 09:34:19 +0000144 * @return Quantized tensor.
Anthony Barbierf202e502017-11-23 18:02:04 +0000145 */
Michele Di Giorgio4aff98f2019-08-28 16:27:26 +0100146template <typename T>
147SimpleTensor<T> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info);
148
149/** Convert quantized simple tensor into float using tensor quantization information.
150 *
151 * @param[in] src Quantized tensor.
152 *
153 * @return Float tensor.
154 */
Manuel Bottini3689fcd2019-06-14 17:18:12 +0100155template <typename T>
156SimpleTensor<float> convert_from_symmetric(const SimpleTensor<T> &src);
157
158/** Convert float simple tensor into quantized using specified quantization information.
159 *
160 * @param[in] src Float tensor.
161 * @param[in] quantization_info Quantification information.
ramy.elgammal@arm.coma2561f02023-06-16 20:45:48 +0100162 * \relates arm_compute::test::SimpleTensor
Manuel Bottini3689fcd2019-06-14 17:18:12 +0100163 * @return Quantized tensor.
164 */
165template <typename T>
166SimpleTensor<T> convert_to_symmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info);
167
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000168/** Matrix multiply between 2 float simple tensors
169 *
170 * @param[in] a Input tensor A
171 * @param[in] b Input tensor B
172 * @param[out] out Output tensor
173 *
174 */
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100175template <typename T>
176void matrix_multiply(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &out);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000177
178/** Transpose matrix
179 *
180 * @param[in] in Input tensor
181 * @param[out] out Output tensor
182 *
183 */
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100184template <typename T>
185void transpose_matrix(const SimpleTensor<T> &in, SimpleTensor<T> &out);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000186
187/** Get a 2D tile from a tensor
188 *
189 * @note In case of out-of-bound reads, the tile will be filled with zeros
190 *
191 * @param[in] in Input tensor
192 * @param[out] tile Tile
193 * @param[in] coord Coordinates
194 */
195template <typename T>
196void get_tile(const SimpleTensor<T> &in, SimpleTensor<T> &tile, const Coordinates &coord);
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100197
198/** Fill with zeros the input tensor in the area defined by anchor and shape
199 *
200 * @param[in] in Input tensor to fill with zeros
201 * @param[out] anchor Starting point of the zeros area
202 * @param[in] shape Ending point of the zeros area
203 */
204template <typename T>
205void zeros(SimpleTensor<T> &in, const Coordinates &anchor, const TensorShape &shape);
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100206
207/** Helper function to compute quantized min and max bounds
208 *
209 * @param[in] quant_info Quantization info to be used for conversion
210 * @param[in] min Floating point minimum value to be quantized
211 * @param[in] max Floating point maximum value to be quantized
212 */
213std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, float min, float max);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100214
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000215/** Helper function to compute asymmetric quantized signed min and max bounds
216 *
217 * @param[in] quant_info Quantization info to be used for conversion
218 * @param[in] min Floating point minimum value to be quantized
219 * @param[in] max Floating point maximum value to be quantized
220 */
221std::pair<int, int> get_quantized_qasymm8_signed_bounds(const QuantizationInfo &quant_info, float min, float max);
222
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100223/** Helper function to compute symmetric quantized min and max bounds
224 *
225 * @param[in] quant_info Quantization info to be used for conversion
226 * @param[in] min Floating point minimum value to be quantized
227 * @param[in] max Floating point maximum value to be quantized
228 * @param[in] channel_id Channel id for per channel quantization info.
229 */
230std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0);
Giorgio Arena63825e82021-03-25 14:54:50 +0000231
Giorgio Arenaebbb6f92021-04-13 09:52:18 +0100232/** Add random padding along the X axis (between 1 and 16 columns per side) to all the input tensors.
233 * This is used in our validation suite in order to simulate implicit padding addition after configuring, but before allocating.
Giorgio Arena63825e82021-03-25 14:54:50 +0000234 *
Manuel Bottinif733e032021-05-19 16:15:36 +0100235 * @param[in] tensors List of tensors to add padding to
236 * @param[in] data_layout (Optional) Data layout of the operator
237 * @param[in] only_right_pad (Optional) Only right padding testing, in case of cl image padding
Giorgio Arena63825e82021-03-25 14:54:50 +0000238 *
239 * @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC
240 */
Manuel Bottinif733e032021-05-19 16:15:36 +0100241void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &data_layout = DataLayout::NHWC, bool only_right_pad = false);
Gian Marco Iodice72b56872021-06-29 10:08:46 +0100242
Gunes Bayir532ce2c2023-09-14 09:13:49 +0100243/** For a matrix multiplication, given the Lhs/Rhs matrix quantization informations and the matrix multiplication dimensions,
244 * calculate a suitable output quantization and suggested bias range for obtaining non-saturated outputs with high probability.
245 *
246 * @param[in] lhs_q_info Lhs matrix quantization info
247 * @param[in] rhs_q_info Rhs matrix quantization info
248 * @param[in] m Number of rows of Lhs matrix
249 * @param[in] n Number of columns of Rhs Matrix
250 * @param[in] k Number of rows/columns of Rhs/Lhs Matrix
251 * @param[in] data_type data type, only QASYMM8, QASYMM8_SIGNED are supported
252 * @param[in] bias_fraction the fraction of bias amplitude compared to integer accummulation. 0 if there is no bias.
253 *
254 * @return QuantizationHint object containing the suggested output quantization info and min/max bias range
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100255 */
Gunes Bayir532ce2c2023-09-14 09:13:49 +0100256QuantizationHint suggest_matmul_dst_q_info_and_bias(const QuantizationInfo &lhs_q_info,
257 const QuantizationInfo &rhs_q_info, int32_t m, int32_t n, int32_t k, DataType data_type,
258 float bias_fraction);
259
260/** For a multiply-accumulate (mac), given the Lhs/Rhs vector quantization informations and the dot product dimensions,
261 * calculate a suitable output quantization and suggested bias range for obtaining non-saturated outputs with high probability.
262 *
263 * @param[in] lhs_q_info Lhs matrix quantization info
264 * @param[in] rhs_q_info Rhs matrix quantization info
265 * @param[in] k number of accumulations taking place in the sum, i.e. c_k = sum_k(a_k * b_k)
266 * @param[in] data_type data type, only QASYMM8, QASYMM8_SIGNED are supported
267 * @param[in] bias_fraction the fraction of bias amplitude compared to integer accummulation.
268 *
269 * @return QuantizationHint object containing the suggested output quantization info and min/max bias range
270 */
271QuantizationHint suggest_mac_dst_q_info_and_bias(const QuantizationInfo &lhs_q_info,
272 const QuantizationInfo &rhs_q_info, int32_t k, DataType data_type, float bias_fraction);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100273} // namespace validation
274} // namespace test
275} // namespace arm_compute
Anitha Rajb566b6e2023-08-23 11:40:06 +0100276#endif // ACL_TESTS_VALIDATION_HELPERS_H