blob: 3449239e452a7af27a46a9f390f8e87aeac8e0df [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gunes Bayir9d0c4de2023-04-13 18:22:58 +01002 * Copyright (c) 2017-2023 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gunes Bayir9d0c4de2023-04-13 18:22:58 +010024#ifndef ACL_TESTS_VALIDATION_HELPERS
25#define ACL_TESTS_VALIDATION_HELPERS
Anthony Barbier6ff3b192017-09-04 18:44:23 +010026
Georgios Pinitas7b7858d2017-06-21 16:44:24 +010027#include "arm_compute/core/Types.h"
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010028#include "arm_compute/core/Utils.h"
Moritz Pflanzer6c6597c2017-09-24 12:09:41 +010029#include "support/Half.h"
John Richardson6f4d49f2017-09-07 11:21:10 +010030#include "tests/Globals.h"
Moritz Pflanzer6c6597c2017-09-24 12:09:41 +010031#include "tests/SimpleTensor.h"
Georgios Pinitas7b7858d2017-06-21 16:44:24 +010032
Michalis Spyroua3c9a3b2020-12-08 21:02:16 +000033#include <math.h>
Georgios Pinitas7b7858d2017-06-21 16:44:24 +010034#include <random>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035#include <type_traits>
36#include <utility>
37
38namespace arm_compute
39{
40namespace test
41{
42namespace validation
43{
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010044template <typename T>
45struct is_floating_point : public std::is_floating_point<T>
Pablo Tello8fda1cb2017-07-05 15:20:38 +010046{
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010047};
48
49template <>
Georgios Pinitas583137c2017-08-31 18:12:42 +010050struct is_floating_point<half> : public std::true_type
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010051{
52};
Pablo Tello8fda1cb2017-07-05 15:20:38 +010053
Anthony Barbier6ff3b192017-09-04 18:44:23 +010054/** Helper function to get the testing range for each activation layer.
55 *
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010056 * @param[in] activation Activation function to test.
57 * @param[in] data_type Data type.
Anthony Barbier6ff3b192017-09-04 18:44:23 +010058 *
59 * @return A pair containing the lower upper testing bounds for a given function.
60 */
61template <typename T>
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010062std::pair<T, T> get_activation_layer_test_bounds(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010063{
Anthony Barbier6ff3b192017-09-04 18:44:23 +010064 std::pair<T, T> bounds;
65
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010066 switch(data_type)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010067 {
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010068 case DataType::F16:
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010069 {
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010070 using namespace half_float::literal;
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010071
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010072 switch(activation)
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010073 {
Georgios Pinitas3463a8b2018-08-23 13:11:53 +010074 case ActivationLayerInfo::ActivationFunction::TANH:
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010075 case ActivationLayerInfo::ActivationFunction::SQUARE:
76 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
77 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
78 // Reduce range as exponent overflows
Georgios Pinitas3463a8b2018-08-23 13:11:53 +010079 bounds = std::make_pair(-2._h, 2._h);
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010080 break;
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010081 case ActivationLayerInfo::ActivationFunction::SQRT:
82 // Reduce range as sqrt should take a non-negative number
Georgios Pinitas3463a8b2018-08-23 13:11:53 +010083 bounds = std::make_pair(0._h, 128._h);
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010084 break;
85 default:
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010086 bounds = std::make_pair(-255._h, 255._h);
87 break;
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010088 }
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010089 break;
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +010090 }
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010091 case DataType::F32:
92 switch(activation)
93 {
Gunes Bayir01934e92022-11-02 11:50:37 +000094 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
95 // Reduce range as exponent overflows
96 bounds = std::make_pair(-40.f, 40.f);
97 break;
Moritz Pflanzera09de0c2017-09-01 20:41:12 +010098 case ActivationLayerInfo::ActivationFunction::SQRT:
99 // Reduce range as sqrt should take a non-negative number
100 bounds = std::make_pair(0.f, 255.f);
101 break;
102 default:
103 bounds = std::make_pair(-255.f, 255.f);
104 break;
105 }
106 break;
Moritz Pflanzera09de0c2017-09-01 20:41:12 +0100107 default:
108 ARM_COMPUTE_ERROR("Unsupported data type");
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +0100109 }
110
Moritz Pflanzera09de0c2017-09-01 20:41:12 +0100111 return bounds;
Isabella Gottardi3b77e9d2017-06-22 11:05:41 +0100112}
113
Georgios Pinitasac4e8732017-07-05 17:02:25 +0100114/** Calculate output tensor shape give a vector of input tensor to concatenate
115 *
116 * @param[in] input_shapes Shapes of the tensors to concatenate across depth.
117 *
118 * @return The shape of output concatenated tensor.
119 */
Moritz Pflanzera09de0c2017-09-01 20:41:12 +0100120TensorShape calculate_depth_concatenate_shape(const std::vector<TensorShape> &input_shapes);
Sanghoon Leea7a5b7b2017-09-14 12:11:03 +0100121
Pablo Tello3dd5b682019-03-04 14:14:02 +0000122/** Calculate output tensor shape for the concatenate operation along a given axis
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100123 *
124 * @param[in] input_shapes Shapes of the tensors to concatenate across width.
Pablo Tello3dd5b682019-03-04 14:14:02 +0000125 * @param[in] axis Axis to use for the concatenate operation
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100126 *
127 * @return The shape of output concatenated tensor.
128 */
Pablo Tello3dd5b682019-03-04 14:14:02 +0000129TensorShape calculate_concatenate_shape(const std::vector<TensorShape> &input_shapes, size_t axis);
Michalis Spyrou55b3d122018-05-09 09:59:23 +0100130
Sang-Hoon Park0779fec2019-11-13 17:08:12 +0000131/** Convert an asymmetric quantized simple tensor into float using tensor quantization information.
Anton Lokhmotovaf6204c2017-11-08 09:34:19 +0000132 *
133 * @param[in] src Quantized tensor.
134 *
135 * @return Float tensor.
Anthony Barbierf202e502017-11-23 18:02:04 +0000136 */
Michalis Spyroued7b27d2019-11-27 16:04:17 +0000137template <typename T>
138SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<T> &src);
Michele Di Giorgio578a9fc2019-08-23 11:49:04 +0100139
Anton Lokhmotovaf6204c2017-11-08 09:34:19 +0000140/** Convert float simple tensor into quantized using specified quantization information.
141 *
142 * @param[in] src Float tensor.
143 * @param[in] quantization_info Quantification information.
144 *
145 * @return Quantized tensor.
Anthony Barbierf202e502017-11-23 18:02:04 +0000146 */
Michele Di Giorgio4aff98f2019-08-28 16:27:26 +0100147template <typename T>
148SimpleTensor<T> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info);
149
150/** Convert quantized simple tensor into float using tensor quantization information.
151 *
152 * @param[in] src Quantized tensor.
153 *
154 * @return Float tensor.
155 */
Manuel Bottini3689fcd2019-06-14 17:18:12 +0100156template <typename T>
157SimpleTensor<float> convert_from_symmetric(const SimpleTensor<T> &src);
158
159/** Convert float simple tensor into quantized using specified quantization information.
160 *
161 * @param[in] src Float tensor.
162 * @param[in] quantization_info Quantification information.
163 *
164 * @return Quantized tensor.
165 */
166template <typename T>
167SimpleTensor<T> convert_to_symmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info);
168
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000169/** Matrix multiply between 2 float simple tensors
170 *
171 * @param[in] a Input tensor A
172 * @param[in] b Input tensor B
173 * @param[out] out Output tensor
174 *
175 */
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100176template <typename T>
177void matrix_multiply(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &out);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000178
179/** Transpose matrix
180 *
181 * @param[in] in Input tensor
182 * @param[out] out Output tensor
183 *
184 */
Vidhya Sudhan Loganathan71ecf392018-08-31 16:10:16 +0100185template <typename T>
186void transpose_matrix(const SimpleTensor<T> &in, SimpleTensor<T> &out);
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000187
188/** Get a 2D tile from a tensor
189 *
190 * @note In case of out-of-bound reads, the tile will be filled with zeros
191 *
192 * @param[in] in Input tensor
193 * @param[out] tile Tile
194 * @param[in] coord Coordinates
195 */
196template <typename T>
197void get_tile(const SimpleTensor<T> &in, SimpleTensor<T> &tile, const Coordinates &coord);
Gian Marco Iodicef1c2bf02018-06-13 14:05:54 +0100198
199/** Fill with zeros the input tensor in the area defined by anchor and shape
200 *
201 * @param[in] in Input tensor to fill with zeros
202 * @param[out] anchor Starting point of the zeros area
203 * @param[in] shape Ending point of the zeros area
204 */
205template <typename T>
206void zeros(SimpleTensor<T> &in, const Coordinates &anchor, const TensorShape &shape);
Michele Di Giorgioed5a4922018-09-13 16:22:01 +0100207
208/** Helper function to compute quantized min and max bounds
209 *
210 * @param[in] quant_info Quantization info to be used for conversion
211 * @param[in] min Floating point minimum value to be quantized
212 * @param[in] max Floating point maximum value to be quantized
213 */
214std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, float min, float max);
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100215
Georgios Pinitas6e1791b2019-12-02 19:01:25 +0000216/** Helper function to compute asymmetric quantized signed min and max bounds
217 *
218 * @param[in] quant_info Quantization info to be used for conversion
219 * @param[in] min Floating point minimum value to be quantized
220 * @param[in] max Floating point maximum value to be quantized
221 */
222std::pair<int, int> get_quantized_qasymm8_signed_bounds(const QuantizationInfo &quant_info, float min, float max);
223
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100224/** Helper function to compute symmetric quantized min and max bounds
225 *
226 * @param[in] quant_info Quantization info to be used for conversion
227 * @param[in] min Floating point minimum value to be quantized
228 * @param[in] max Floating point maximum value to be quantized
229 * @param[in] channel_id Channel id for per channel quantization info.
230 */
231std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0);
Giorgio Arena63825e82021-03-25 14:54:50 +0000232
Giorgio Arenaebbb6f92021-04-13 09:52:18 +0100233/** Add random padding along the X axis (between 1 and 16 columns per side) to all the input tensors.
234 * This is used in our validation suite in order to simulate implicit padding addition after configuring, but before allocating.
Giorgio Arena63825e82021-03-25 14:54:50 +0000235 *
Manuel Bottinif733e032021-05-19 16:15:36 +0100236 * @param[in] tensors List of tensors to add padding to
237 * @param[in] data_layout (Optional) Data layout of the operator
238 * @param[in] only_right_pad (Optional) Only right padding testing, in case of cl image padding
Giorgio Arena63825e82021-03-25 14:54:50 +0000239 *
240 * @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC
241 */
Manuel Bottinif733e032021-05-19 16:15:36 +0100242void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &data_layout = DataLayout::NHWC, bool only_right_pad = false);
Gian Marco Iodice72b56872021-06-29 10:08:46 +0100243
244/** Add random padding along the Y axis (between 1 and 4 rows per side) to all the input tensors.
245 * This is used in our validation suite in order to simulate implicit padding addition after configuring, but before allocating.
246 *
247 * @param[in] tensors List of tensors to add padding to
248 * @param[in] data_layout (Optional) Data layout of the operator
249 *
250 * @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC
251 */
252void add_padding_y(std::initializer_list<ITensor *> tensors, const DataLayout &data_layout = DataLayout::NHWC);
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100253
254/** For MatMulLowp, given the Lhs/Rhs matrix quantization informations and the matrix multiplication dimensions,
255 * calculate a suitable output quantization for obtaining non-saturated outputs with high probability.
256 */
257QuantizationInfo calculate_mat_mul_dst_q_info(const QuantizationInfo &lhs_q_info, const QuantizationInfo &rhs_q_info, int m, int n, int k, DataType data_type);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100258} // namespace validation
259} // namespace test
260} // namespace arm_compute
Gunes Bayir9d0c4de2023-04-13 18:22:58 +0100261#endif /* ACL_TESTS_VALIDATION_HELPERS */