blob: 95e331560ddb4ef2ccaba9310b3450b6fd1ec454 [file] [log] [blame]
Pablo Tello89519332017-11-17 11:52:36 +00001/*
Pablo Tellod6ca4782018-01-23 09:36:04 +00002 * Copyright (c) 2017-2018 ARM Limited.
Pablo Tello89519332017-11-17 11:52:36 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
25#define ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000029#include "arm_compute/core/utils/misc/ShapeCalculator.h"
Pablo Tello89519332017-11-17 11:52:36 +000030#include "arm_compute/runtime/NEON/NEScheduler.h"
31#include "tests/AssetsLibrary.h"
32#include "tests/Globals.h"
33#include "tests/IAccessor.h"
34#include "tests/framework/Asserts.h"
35#include "tests/framework/Fixture.h"
Pablo Tello89519332017-11-17 11:52:36 +000036#include "tests/validation/Helpers.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000037#include "tests/validation/reference/ConvolutionLayer.h"
38#include "tests/validation/reference/Utils.h"
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000039#include "tests/validation/reference/Winograd.h"
Pablo Tello89519332017-11-17 11:52:36 +000040
41#include <random>
42
43namespace arm_compute
44{
45class NEWinogradLayer;
46
47namespace test
48{
49namespace validation
50{
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +000051using namespace arm_compute::misc::shape_calculator;
52
Pablo Tello89519332017-11-17 11:52:36 +000053template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
54class WinogradLayerValidationFixture : public framework::Fixture
55{
56public:
57 template <typename...>
58 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info)
59 {
60 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info);
61 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info);
62 }
63
64protected:
65 template <typename U>
66 void fill(U &&tensor, int i, float min, float max)
67 {
68 switch(tensor.data_type())
69 {
70 case DataType::F32:
71 {
72 std::uniform_real_distribution<> distribution(min, max);
73 library->fill(tensor, distribution, i);
74 break;
75 }
76 default:
77 {
78 ARM_COMPUTE_ERROR("Not supported");
79 library->fill_tensor_uniform(tensor, i);
80 break;
81 }
82 }
83 }
84
85 TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info)
86 {
87 // Create tensors
88 TensorType src = create_tensor<TensorType>(input_shape, DataType::F32, 1);
89 TensorType weights = create_tensor<TensorType>(weights_shape, DataType::F32, 1);
90 TensorType bias = create_tensor<TensorType>(bias_shape, DataType::F32, 1);
91 TensorType dst = create_tensor<TensorType>(output_shape, DataType::F32, 1);
92
93 // Create and configure function
94 FunctionType conv;
Pablo Tellod6ca4782018-01-23 09:36:04 +000095 conv.configure(&src, &weights, &bias, &dst, info);
Pablo Tello89519332017-11-17 11:52:36 +000096
97 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
98 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
99 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
100 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
101
102 // Allocate tensors
103 src.allocator()->allocate();
104 weights.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000105 dst.allocator()->allocate();
Pablo Tellod6ca4782018-01-23 09:36:04 +0000106 bias.allocator()->allocate();
Pablo Tello89519332017-11-17 11:52:36 +0000107
108 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
109 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
110 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
111 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
112
113 // Fill tensors
114 fill(AccessorType(src), 0, -1.f, 1.f);
115 fill(AccessorType(weights), 1, -1.f, 1.f);
Pablo Tellod6ca4782018-01-23 09:36:04 +0000116 fill(AccessorType(bias), 2, -1.f, 1.f);
Pablo Tello89519332017-11-17 11:52:36 +0000117 fill(AccessorType(dst), 3, -1.f, 1.f);
118
119 // Compute NEWinogradLayer function
120 conv.run();
121
122 return dst;
123 }
124
125 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info)
126 {
127 // Create reference
128 SimpleTensor<T> src{ input_shape, DataType::F32, 1 };
129 SimpleTensor<T> weights{ weights_shape, DataType::F32, 1 };
130 SimpleTensor<T> bias{ bias_shape, DataType::F32, 1 };
131
132 // Fill reference
133 fill(src, 0, -1.f, 1.f);
134 fill(weights, 1, -1.f, 1.f);
Pablo Tellod6ca4782018-01-23 09:36:04 +0000135 fill(bias, 2, -1.f, 1.f);
Pablo Tello89519332017-11-17 11:52:36 +0000136
137 return reference::convolution_layer<T>(src, weights, bias, output_shape, info);
138 }
139
140 TensorType _target{};
141 SimpleTensor<T> _reference{};
142 int _fractional_bits{};
143 DataType _data_type{};
144};
145
Giorgio Arena1f9ca1d2018-03-01 11:13:45 +0000146template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
147class WinogradInputTransformValidationFixture : public framework::Fixture
148{
149public:
150 template <typename...>
151 void setup(TensorShape input_shape, PadStrideInfo conv_info, Size2D kernel_dims, bool is_nchw_format, DataType data_type)
152 {
153 TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), conv_info, kernel_dims);
154
155 _target = compute_target(input_shape, output_shape, conv_info, kernel_dims, is_nchw_format, data_type);
156 _reference = compute_reference(input_shape, output_shape, conv_info, kernel_dims, is_nchw_format, data_type);
157 }
158
159protected:
160 template <typename U>
161 void fill(U &&tensor, int i, float min, float max)
162 {
163 switch(tensor.data_type())
164 {
165 case DataType::F32:
166 {
167 std::uniform_real_distribution<> distribution(min, max);
168 library->fill(tensor, distribution, i);
169 break;
170 }
171 default:
172 {
173 ARM_COMPUTE_ERROR("Not supported");
174 library->fill_tensor_uniform(tensor, i);
175 break;
176 }
177 }
178 }
179
180 TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims, bool is_nchw_format, DataType data_type)
181 {
182 ARM_COMPUTE_UNUSED(is_nchw_format);
183
184 // Create tensors
185 TensorType src = create_tensor<TensorType>(input_shape, data_type);
186 TensorType dst = create_tensor<TensorType>(output_shape, data_type);
187
188 // Create and configure function
189 FunctionType transf;
190 transf.configure(&src, &dst, conv_info, kernel_dims);
191
192 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
193 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
194
195 // Allocate tensors
196 src.allocator()->allocate();
197 dst.allocator()->allocate();
198
199 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
200 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
201
202 // Fill tensors
203 fill(AccessorType(src), 0, -1.f, 1.f);
204
205 // Compute CLWinogradInputTransform function
206 transf.run();
207
208 return dst;
209 }
210
211 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims, bool is_nchw_format, DataType data_type)
212 {
213 ARM_COMPUTE_UNUSED(is_nchw_format);
214
215 // Create reference
216 SimpleTensor<T> src{ input_shape, data_type };
217
218 // Fill reference
219 fill(src, 0, -1.f, 1.f);
220
221 return reference::winograd_input_transform<T>(src, output_shape, conv_info, kernel_dims);
222 }
223
224 TensorType _target{};
225 SimpleTensor<T> _reference{};
226};
Pablo Tello89519332017-11-17 11:52:36 +0000227} // namespace validation
228} // namespace test
229} // namespace arm_compute
230#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */