blob: d13fef2f02d37f850e03de925bfe7e94d17adfca [file] [log] [blame]
Gunes Bayirae72a462023-01-29 13:24:24 +00001/*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
Gunes Bayire071b5e2023-09-19 15:44:21 +010025#ifndef ACL_TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE_H
26#define ACL_TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE_H
Gunes Bayirae72a462023-01-29 13:24:24 +000027
28#include "arm_compute/core/TensorShape.h"
29#include "arm_compute/core/Types.h"
30#include "tests/AssetsLibrary.h"
31#include "tests/Globals.h"
32#include "tests/IAccessor.h"
33#include "tests/framework/Asserts.h"
34#include "tests/framework/Fixture.h"
35#include "tests/validation/Helpers.h"
36#include "tests/validation/reference/ActivationLayer.h"
37#include "tests/validation/reference/ArithmeticOperations.h"
38#include "tests/validation/reference/DequantizationLayer.h"
39#include "tests/validation/reference/PixelWiseMultiplication.h"
40#include "tests/validation/reference/QuantizationLayer.h"
41
42namespace arm_compute
43{
44namespace test
45{
46namespace validation
47{
48template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
49class AddMulAddGenericFixture : public framework::Fixture
50{
51public:
Gunes Bayirae72a462023-01-29 13:24:24 +000052 void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info, bool interm_out)
53 {
54 compute_target(shape, data_type, act_info, interm_out);
55 }
56
57protected:
58 template <typename U>
59 void fill(U &&tensor, int i, DataType data_type)
60 {
61 switch(data_type)
62 {
63 case DataType::F32:
64 library->fill_tensor_uniform(tensor, i, -10.f, 10.f);
65 break;
66 case DataType::F16:
67 library->fill_tensor_uniform(tensor, i, -1.f, 1.f);
68 break;
69 default:
70 library->fill_tensor_uniform(tensor, i);
71 break;
72 }
73 }
74
75 void compute_target(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info, bool interm_out)
76 {
77 TensorShape b_shape(shape.x());
78
79 // Create tensors
80 TensorType input1 = create_tensor<TensorType>(shape, data_type, 1, _input1_qinfo);
81 TensorType input2 = create_tensor<TensorType>(shape, data_type, 1, _input2_qinfo);
82 TensorType bn_mul = create_tensor<TensorType>(b_shape, data_type, 1, _bn_mul_qinfo);
83 TensorType bn_add = create_tensor<TensorType>(b_shape, data_type, 1, _bn_add_qinfo);
84 TensorType add_output = create_tensor<TensorType>(shape, data_type, 1, _add_output_qinfo);
85 TensorType final_output = create_tensor<TensorType>(shape, data_type, 1, _final_output_qinfo);
86
87 // Create and configure function
88 FunctionType add_mul_add;
Gunes Bayire071b5e2023-09-19 15:44:21 +010089 ARM_COMPUTE_ERROR_THROW_ON(add_mul_add.validate(input1.info(), input2.info(), bn_mul.info(),
90 bn_add.info(), interm_out ? add_output.info() : nullptr, final_output.info(),
91 ConvertPolicy::SATURATE, act_info));
92
93 add_mul_add.configure(&input1, &input2, &bn_mul, &bn_add, interm_out ? &add_output : nullptr,
94 &final_output, ConvertPolicy::SATURATE, act_info);
Gunes Bayirae72a462023-01-29 13:24:24 +000095
96 // Allocate tensors
97 input1.allocator()->allocate();
98 input2.allocator()->allocate();
99 bn_mul.allocator()->allocate();
100 bn_add.allocator()->allocate();
101
102 if(interm_out)
103 {
104 add_output.allocator()->allocate();
105 }
106
107 final_output.allocator()->allocate();
108
109 // Fill tensors
110 fill(AccessorType(input1), 0, data_type);
111 fill(AccessorType(input2), 1, data_type);
112 fill(AccessorType(bn_mul), 2, data_type);
113 fill(AccessorType(bn_add), 3, data_type);
114
115 // // Compute function
116 add_mul_add.run();
117
118 _target = std::move(final_output);
119
120 if(interm_out)
121 {
122 _interm_target = std::move(add_output);
123 }
124 }
125
126 TensorType _target{};
127 TensorType _interm_target{};
128 SimpleTensor<T> _reference{};
129 SimpleTensor<T> _interm_reference{};
130
131 QuantizationInfo _input1_qinfo{};
132 QuantizationInfo _input2_qinfo{};
133 QuantizationInfo _bn_mul_qinfo{};
134 QuantizationInfo _bn_add_qinfo{};
135 QuantizationInfo _add_output_qinfo{};
136 QuantizationInfo _final_output_qinfo{};
137};
138
139template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool interm_out>
140class AddMulAddFloatValidationFixture : public AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>
141{
142public:
143 using Parent = AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>;
144
Gunes Bayirae72a462023-01-29 13:24:24 +0000145 void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo act_info)
146 {
147 Parent::setup(shape, data_type, act_info, interm_out);
148 compute_reference(shape, data_type, act_info);
149 }
150
151 // Compute Reference is moved outside of the generic fixture because with the quantized data types,
152 // it becomes a very different implementation with intermediate tensors' data types being always float.
153 // This way the reference calculations are more readable and the size of the classes will be smaller
154 // due to unrepeated fill() and target() methods.
155 void compute_reference(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info)
156 {
157 TensorShape b_shape(shape.x());
158
159 // Create reference
160 SimpleTensor<T> input1{ shape, data_type };
161 SimpleTensor<T> input2{ shape, data_type };
162 SimpleTensor<T> bn_mul{ b_shape, data_type };
163 SimpleTensor<T> bn_add{ b_shape, data_type };
164 SimpleTensor<T> add_output{ shape, data_type, 1 };
165
166 SimpleTensor<T> bn_mul_out{ shape, data_type };
167 SimpleTensor<T> bn_add_out{ shape, data_type };
168
169 // Fill reference
170 Parent::fill(input1, 0, data_type);
171 Parent::fill(input2, 1, data_type);
172 Parent::fill(bn_mul, 2, data_type);
173 Parent::fill(bn_add, 3, data_type);
174
175 reference::arithmetic_operation<T>(reference::ArithmeticOperation::ADD, input1, input2, add_output, ConvertPolicy::SATURATE);
176 bn_mul_out = reference::pixel_wise_multiplication<T, T, T>(add_output, bn_mul, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_UP, data_type);
177 reference::arithmetic_operation<T>(reference::ArithmeticOperation::ADD, bn_mul_out, bn_add, bn_add_out, ConvertPolicy::SATURATE);
178
179 if(interm_out)
180 {
181 Parent::_interm_reference = std::move(add_output);
182 }
183
184 if(act_info.enabled() && act_info.activation() != ActivationLayerInfo::ActivationFunction::IDENTITY)
185 {
186 Parent::_reference = reference::activation_layer(bn_add_out, act_info);
187 }
188 else
189 {
190 Parent::_reference = std::move(bn_add_out);
191 }
192 }
193};
194
195template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool interm_out>
196class AddMulAddQuantizedValidationFixture : public AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>
197{
198public:
199 using Parent = AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>;
200
Gunes Bayirae72a462023-01-29 13:24:24 +0000201 void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo act_info,
202 QuantizationInfo input1_qinfo, QuantizationInfo input2_qinfo, QuantizationInfo bn_mul_qinfo,
203 QuantizationInfo bn_add_qinfo, QuantizationInfo add_output_qinfo, QuantizationInfo final_output_qinfo)
204 {
205 // Quantization arguments moved to class attributes to prevent long function declerations
206 Parent::_input1_qinfo = input1_qinfo;
207 Parent::_input2_qinfo = input2_qinfo;
208 Parent::_bn_mul_qinfo = bn_mul_qinfo;
209 Parent::_bn_add_qinfo = bn_add_qinfo;
210 Parent::_add_output_qinfo = add_output_qinfo;
211 Parent::_final_output_qinfo = final_output_qinfo;
212
213 Parent::setup(shape, data_type, act_info, interm_out);
214 compute_reference(shape, data_type, act_info);
215 }
216
217 // Compute Reference is moved outside of the generic fixture because with the quantized data types,
218 // it becomes a very different implementation with intermediate tensors' data types being always float.
219 // This way the reference calculations are more readable and the size of the classes will be smaller
220 // due to unrepeated fill() and target() methods.
221 void compute_reference(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info)
222 {
223 TensorShape b_shape(shape.x());
224
225 // Create reference
226 SimpleTensor<T> input1{ shape, data_type, 1, Parent::_input1_qinfo };
227 SimpleTensor<T> input2{ shape, data_type, 1, Parent::_input2_qinfo };
228 SimpleTensor<T> bn_mul{ b_shape, data_type, 1, Parent::_bn_mul_qinfo };
229 SimpleTensor<T> bn_add{ b_shape, data_type, 1, Parent::_bn_add_qinfo };
230
231 // Fill input tensors
232 Parent::fill(input1, 0, data_type);
233 Parent::fill(input2, 1, data_type);
234 Parent::fill(bn_mul, 2, data_type);
235 Parent::fill(bn_add, 3, data_type);
236
237 SimpleTensor<float> input1_dequantized = reference::dequantization_layer<float>(input1);
238 SimpleTensor<float> input2_dequantized = reference::dequantization_layer<float>(input2);
239 SimpleTensor<float> bn_mul_dequantized = reference::dequantization_layer<float>(bn_mul);
240 SimpleTensor<float> bn_add_dequantized = reference::dequantization_layer<float>(bn_add);
241
242 SimpleTensor<float> add_output_dequantized{ shape, DataType::F32 };
243 SimpleTensor<float> bn_add_out_dequantized{ shape, DataType::F32 };
244
245 reference::arithmetic_operation<float>(reference::ArithmeticOperation::ADD, input1_dequantized, input2_dequantized, add_output_dequantized, ConvertPolicy::SATURATE);
246 SimpleTensor<float> bn_mul_out_dequantized = reference::pixel_wise_multiplication<float, float, float>(add_output_dequantized, bn_mul_dequantized, 1.f, ConvertPolicy::SATURATE,
247 RoundingPolicy::TO_NEAREST_UP, DataType::F32);
248 reference::arithmetic_operation<float>(reference::ArithmeticOperation::ADD, bn_mul_out_dequantized, bn_add_dequantized, bn_add_out_dequantized, ConvertPolicy::SATURATE);
249
250 if(interm_out)
251 {
252 Parent::_interm_reference = reference::quantization_layer<float, T>(add_output_dequantized, data_type, Parent::_add_output_qinfo);
253 }
254
255 if(act_info.enabled() && act_info.activation() != ActivationLayerInfo::ActivationFunction::IDENTITY)
256 {
257 SimpleTensor<T> ref = reference::quantization_layer<float, T>(bn_add_out_dequantized, data_type, Parent::_final_output_qinfo);
258 Parent::_reference = reference::activation_layer(ref, act_info);
259 }
260 else
261 {
262 Parent::_reference = reference::quantization_layer<float, T>(bn_add_out_dequantized, data_type, Parent::_final_output_qinfo);
263 }
264 }
265};
266} // namespace validation
267} // namespace test
268} // namespace arm_compute
269
Gunes Bayire071b5e2023-09-19 15:44:21 +0100270#endif // ACL_TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE_H