blob: 28cdc71ae6f49a4ccf3df1c72ae73994cedf0a8d [file] [log] [blame]
giuros01cd96a262018-10-03 12:44:35 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/CLScheduler.h"
25#include "arm_compute/runtime/CL/functions/CLComputeAllAnchors.h"
26#include "arm_compute/runtime/CL/functions/CLGenerateProposalsLayer.h"
27#include "arm_compute/runtime/CL/functions/CLSlice.h"
28#include "tests/CL/CLAccessor.h"
29#include "tests/CL/CLArrayAccessor.h"
30#include "tests/Globals.h"
31#include "tests/framework/Macros.h"
32#include "tests/framework/datasets/Datasets.h"
33#include "tests/validation/Validation.h"
34#include "tests/validation/fixtures/ComputeAllAnchorsFixture.h"
35#include "utils/TypePrinter.h"
36
37namespace arm_compute
38{
39namespace test
40{
41namespace validation
42{
43namespace
44{
45template <typename U, typename T>
46inline void fill_tensor(U &&tensor, const std::vector<T> &v)
47{
48 std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
49}
50
51const auto ComputeAllInfoDataset = framework::dataset::make("ComputeAllInfo",
52{
53 ComputeAnchorsInfo(10U, 10U, 1. / 16.f),
54 ComputeAnchorsInfo(100U, 1U, 1. / 2.f),
55 ComputeAnchorsInfo(100U, 1U, 1. / 4.f),
56 ComputeAnchorsInfo(100U, 100U, 1. / 4.f),
57
58});
59} // namespace
60
61TEST_SUITE(CL)
62TEST_SUITE(GenerateProposals)
63
64// *INDENT-OFF*
65// clang-format off
66DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
67 framework::dataset::make("scores", { TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F32),
68 TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Mismatching types
69 TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Wrong deltas (number of transformation non multiple of 4)
70 TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16), // Wrong anchors (number of values per roi != 5)
71 TensorInfo(TensorShape(100U, 100U, 9U), 1, DataType::F16)}), // Output tensor num_valid_proposals not scalar
72 framework::dataset::make("deltas",{ TensorInfo(TensorShape(100U, 100U, 36U), 1, DataType::F32),
73 TensorInfo(TensorShape(100U, 100U, 36U), 1, DataType::F32),
74 TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
75 TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32),
76 TensorInfo(TensorShape(100U, 100U, 38U), 1, DataType::F32)})),
77 framework::dataset::make("anchors", { TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
78 TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
79 TensorInfo(TensorShape(4U, 9U), 1, DataType::F32),
80 TensorInfo(TensorShape(5U, 9U), 1, DataType::F32),
81 TensorInfo(TensorShape(4U, 9U), 1, DataType::F32)})),
82 framework::dataset::make("proposals", { TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
83 TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
84 TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
85 TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32),
86 TensorInfo(TensorShape(5U, 100U*100U*9U), 1, DataType::F32)})),
87 framework::dataset::make("scores_out", { TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
88 TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
89 TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
90 TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32),
91 TensorInfo(TensorShape(100U*100U*9U), 1, DataType::F32)})),
92 framework::dataset::make("num_valid_proposals", { TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
93 TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
94 TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
95 TensorInfo(TensorShape(1U, 1U), 1, DataType::U32),
96 TensorInfo(TensorShape(1U, 10U), 1, DataType::U32)})),
97 framework::dataset::make("generate_proposals_info", { GenerateProposalsInfo(10.f, 10.f, 1.f),
98 GenerateProposalsInfo(10.f, 10.f, 1.f),
99 GenerateProposalsInfo(10.f, 10.f, 1.f),
100 GenerateProposalsInfo(10.f, 10.f, 1.f),
101 GenerateProposalsInfo(10.f, 10.f, 1.f)})),
102 framework::dataset::make("Expected", { true, false, false, false, false })),
103 scores, deltas, anchors, proposals, scores_out, num_valid_proposals, generate_proposals_info, expected)
104{
105 ARM_COMPUTE_EXPECT(bool(CLGenerateProposalsLayer::validate(&scores.clone()->set_is_resizable(true),
106 &deltas.clone()->set_is_resizable(true),
107 &anchors.clone()->set_is_resizable(true),
108 &proposals.clone()->set_is_resizable(true),
109 &scores_out.clone()->set_is_resizable(true),
110 &num_valid_proposals.clone()->set_is_resizable(true),
111 generate_proposals_info)) == expected, framework::LogLevel::ERRORS);
112}
113// clang-format on
114// *INDENT-ON*
115
116template <typename T>
117using CLComputeAllAnchorsFixture = ComputeAllAnchorsFixture<CLTensor, CLAccessor, CLComputeAllAnchors, T>;
118
119TEST_SUITE(Float)
120TEST_SUITE(FP32)
121DATA_TEST_CASE(IntegrationTestCaseAllAnchors, framework::DatasetMode::ALL, framework::dataset::make("DataType", { DataType::F32 }),
122 data_type)
123{
124 const int values_per_roi = 4;
125 const int num_anchors = 3;
126 const int feature_height = 4;
127 const int feature_width = 3;
128
129 SimpleTensor<float> anchors_expected(TensorShape(values_per_roi, feature_width * feature_height * num_anchors), DataType::F32);
130 fill_tensor(anchors_expected, std::vector<float> { -38, -16, 53, 31, -84, -40, 99, 55, -176, -88, 191, 103,
131 -22, -16, 69, 31, -68, -40, 115, 55, -160, -88, 207, 103,
132 -6, -16, 85, 31, -52, -40, 131, 55, -144, -88, 223, 103, -38,
133 0, 53, 47, -84, -24, 99, 71,
134 -176, -72, 191, 119, -22, 0, 69, 47, -68, -24, 115, 71, -160, -72, 207,
135 119, -6, 0, 85, 47, -52, -24, 131, 71, -144, -72, 223, 119, -38, 16, 53,
136 63, -84, -8, 99, 87, -176, -56, 191, 135, -22, 16, 69, 63, -68, -8, 115,
137 87, -160, -56, 207, 135, -6, 16, 85, 63, -52, -8, 131, 87, -144, -56, 223,
138 135, -38, 32, 53, 79, -84, 8, 99, 103, -176, -40, 191, 151, -22, 32, 69,
139 79, -68, 8, 115, 103, -160, -40, 207, 151, -6, 32, 85, 79, -52, 8, 131,
140 103, -144, -40, 223, 151
141 });
142
143 CLTensor all_anchors;
144 CLTensor anchors = create_tensor<CLTensor>(TensorShape(4, num_anchors), data_type);
145
146 // Create and configure function
147 CLComputeAllAnchors compute_anchors;
148 compute_anchors.configure(&anchors, &all_anchors, ComputeAnchorsInfo(feature_width, feature_height, 1. / 16.0));
149 anchors.allocator()->allocate();
150 all_anchors.allocator()->allocate();
151
152 fill_tensor(CLAccessor(anchors), std::vector<float> { -38, -16, 53, 31,
153 -84, -40, 99, 55,
154 -176, -88, 191, 103
155 });
156 // Compute function
157 compute_anchors.run();
158 validate(CLAccessor(all_anchors), anchors_expected);
159}
160
161DATA_TEST_CASE(IntegrationTestCaseGenerateProposals, framework::DatasetMode::ALL, framework::dataset::make("DataType", { DataType::F32 }),
162 data_type)
163{
164 const int values_per_roi = 4;
165 const int num_anchors = 2;
166 const int feature_height = 4;
167 const int feature_width = 5;
168
169 std::vector<float> scores_vector
170 {
171 5.44218998e-03f, 1.19207997e-03f, 1.12379994e-03f, 1.17181998e-03f,
172 1.20544003e-03f, 6.17993006e-04f, 1.05261997e-05f, 8.91025957e-06f,
173 9.29536981e-09f, 6.09605013e-05f, 4.72735002e-04f, 1.13482002e-10f,
174 1.50015003e-05f, 4.45032993e-06f, 3.21612994e-08f, 8.02662980e-04f,
175 1.40488002e-04f, 3.12508007e-07f, 3.02616991e-06f, 1.97759000e-08f,
176 2.66913995e-02f, 5.26766013e-03f, 5.05053019e-03f, 5.62100019e-03f,
177 5.37420018e-03f, 5.26280981e-03f, 2.48894998e-04f, 1.06842002e-04f,
178 3.92931997e-06f, 1.79388002e-03f, 4.79440019e-03f, 3.41609990e-07f,
179 5.20430971e-04f, 3.34090000e-05f, 2.19159006e-07f, 2.28786003e-03f,
180 5.16703985e-05f, 4.04523007e-06f, 1.79227004e-06f, 5.32449000e-08f
181 };
182
183 std::vector<float> bbx_vector
184 {
185 -1.65040009e-02f, -1.84051003e-02f, -1.85930002e-02f, -2.08263006e-02f,
186 -1.83814000e-02f, -2.89172009e-02f, -3.89706008e-02f, -7.52277970e-02f,
187 -1.54091999e-01f, -2.55433004e-02f, -1.77490003e-02f, -1.10340998e-01f,
188 -4.20190990e-02f, -2.71421000e-02f, 6.89801015e-03f, 5.71171008e-02f,
189 -1.75665006e-01f, 2.30021998e-02f, 3.08554992e-02f, -1.39333997e-02f,
190 3.40579003e-01f, 3.91070992e-01f, 3.91624004e-01f, 3.92527014e-01f,
191 3.91445011e-01f, 3.79328012e-01f, 4.26631987e-01f, 3.64892989e-01f,
192 2.76894987e-01f, 5.13985991e-01f, 3.79999995e-01f, 1.80457994e-01f,
193 4.37402993e-01f, 4.18545991e-01f, 2.51549989e-01f, 4.48318988e-01f,
194 1.68564007e-01f, 4.65440989e-01f, 4.21891987e-01f, 4.45928007e-01f,
195 3.27155995e-03f, 3.71480011e-03f, 3.60032008e-03f, 4.27092984e-03f,
196 3.74579988e-03f, 5.95752988e-03f, -3.14473989e-03f, 3.52022005e-03f,
197 -1.88564006e-02f, 1.65188999e-03f, 1.73791999e-03f, -3.56074013e-02f,
198 -1.66615995e-04f, 3.14146001e-03f, -1.11830998e-02f, -5.35363983e-03f,
199 6.49790000e-03f, -9.27671045e-03f, -2.83346009e-02f, -1.61233004e-02f,
200 -2.15505004e-01f, -2.19910994e-01f, -2.20872998e-01f, -2.12831005e-01f,
201 -2.19145000e-01f, -2.27687001e-01f, -3.43973994e-01f, -2.75869995e-01f,
202 -3.19516987e-01f, -2.50418007e-01f, -2.48537004e-01f, -5.08224010e-01f,
203 -2.28724003e-01f, -2.82402009e-01f, -3.75815988e-01f, -2.86352992e-01f,
204 -5.28333001e-02f, -4.43836004e-01f, -4.55134988e-01f, -4.34897989e-01f,
205 -5.65053988e-03f, -9.25739005e-04f, -1.06790999e-03f, -2.37016007e-03f,
206 -9.71166010e-04f, -8.90910998e-03f, -1.17592998e-02f, -2.08992008e-02f,
207 -4.94231991e-02f, 6.63906988e-03f, 3.20469006e-03f, -6.44695014e-02f,
208 -3.11607006e-03f, 2.02738005e-03f, 1.48096997e-02f, 4.39785011e-02f,
209 -8.28424022e-02f, 3.62076014e-02f, 2.71668993e-02f, 1.38250999e-02f,
210 6.76669031e-02f, 1.03252999e-01f, 1.03255004e-01f, 9.89722982e-02f,
211 1.03646003e-01f, 4.79663983e-02f, 1.11014001e-01f, 9.31736007e-02f,
212 1.15768999e-01f, 1.04014002e-01f, -8.90677981e-03f, 1.13103002e-01f,
213 1.33085996e-01f, 1.25405997e-01f, 1.50051996e-01f, -1.13038003e-01f,
214 7.01059997e-02f, 1.79651007e-01f, 1.41055003e-01f, 1.62841007e-01f,
215 -1.00247003e-02f, -8.17587040e-03f, -8.32176022e-03f, -8.90108012e-03f,
216 -8.13035015e-03f, -1.77263003e-02f, -3.69572006e-02f, -3.51580009e-02f,
217 -5.92143014e-02f, -1.80795006e-02f, -5.46086021e-03f, -4.10550982e-02f,
218 -1.83081999e-02f, -2.15411000e-02f, -1.17953997e-02f, 3.33894007e-02f,
219 -5.29635996e-02f, -6.97528012e-03f, -3.15250992e-03f, -3.27355005e-02f,
220 1.29676998e-01f, 1.16080999e-01f, 1.15947001e-01f, 1.21797003e-01f,
221 1.16089001e-01f, 1.44875005e-01f, 1.15617000e-01f, 1.31586999e-01f,
222 1.74735002e-02f, 1.21973999e-01f, 1.31596997e-01f, 2.48907991e-02f,
223 6.18605018e-02f, 1.12855002e-01f, -6.99798986e-02f, 9.58312973e-02f,
224 1.53593004e-01f, -8.75087008e-02f, -4.92327996e-02f, -3.32239009e-02f
225 };
226
227 std::vector<float> anchors_vector{ -38, -16, 53, 31,
228 -120, -120, 135, 135 };
229
230 SimpleTensor<float> proposals_expected(TensorShape(5, 9), DataType::F32);
231 fill_tensor(proposals_expected, std::vector<float> { 0, 0, 0, 79, 59,
232 0, 0, 5.0005703f, 52.63237f, 43.69501495f,
233 0, 24.13628387f, 7.51243401f, 79, 46.06628418f,
234 0, 0, 7.50924301f, 68.47792816f, 46.03357315f,
235 0, 0, 23.09477997f, 51.61448669f, 59,
236 0, 0, 39.52141571f, 52.44710541f, 59,
237 0, 23.57396317f, 29.98791885f, 79, 59,
238 0, 0, 41.90219116f, 79, 59,
239 0, 0, 23.30098343f, 79, 59
240 });
241
242 SimpleTensor<float> scores_expected(TensorShape(9), DataType::F32);
243 fill_tensor(scores_expected, std::vector<float>
244 {
245 2.66913995e-02f,
246 5.44218998e-03f,
247 1.20544003e-03f,
248 1.19207997e-03f,
249 6.17993006e-04f,
250 4.72735002e-04f,
251 6.09605013e-05f,
252 1.50015003e-05f,
253 8.91025957e-06f
254 });
255
256 // Inputs
257 CLTensor scores = create_tensor<CLTensor>(TensorShape(feature_width, feature_height, num_anchors), data_type);
258 CLTensor bbox_deltas = create_tensor<CLTensor>(TensorShape(feature_width, feature_height, values_per_roi * num_anchors), data_type);
259 CLTensor anchors = create_tensor<CLTensor>(TensorShape(values_per_roi, num_anchors), data_type);
260
261 // Outputs
262 CLTensor proposals;
263 CLTensor num_valid_proposals;
264 CLTensor scores_out;
265 num_valid_proposals.allocator()->init(TensorInfo(TensorShape(1), 1, DataType::F32));
266
267 CLGenerateProposalsLayer generate_proposals;
268 generate_proposals.configure(&scores, &bbox_deltas, &anchors, &proposals, &scores_out, &num_valid_proposals,
269 GenerateProposalsInfo(80, 60, 0.166667f, 1 / 16.0, 6000, 300, 0.7f, 16.0f));
270
271 // Allocate memory for input/output tensors
272 scores.allocator()->allocate();
273 bbox_deltas.allocator()->allocate();
274 anchors.allocator()->allocate();
275 proposals.allocator()->allocate();
276 num_valid_proposals.allocator()->allocate();
277 scores_out.allocator()->allocate();
278
279 // Fill inputs
280 fill_tensor(CLAccessor(scores), scores_vector);
281 fill_tensor(CLAccessor(bbox_deltas), bbx_vector);
282 fill_tensor(CLAccessor(anchors), anchors_vector);
283
284 // Run operator
285 generate_proposals.run();
286
287 // Gather num_valid_proposals
288 num_valid_proposals.map();
289 const float N = *reinterpret_cast<float *>(num_valid_proposals.ptr_to_element(Coordinates(0, 0)));
290 num_valid_proposals.unmap();
291
292 // Select the first N entries of the proposals
293 CLTensor proposals_final;
294 CLSlice select_proposals;
295 select_proposals.configure(&proposals, &proposals_final, Coordinates(0, 0), Coordinates(values_per_roi + 1, size_t(N)));
296 proposals_final.allocator()->allocate();
297 select_proposals.run();
298
299 // Select the first N entries of the proposals
300 CLTensor scores_final;
301 CLSlice select_scores;
302 select_scores.configure(&scores_out, &scores_final, Coordinates(0), Coordinates(size_t(N)));
303 scores_final.allocator()->allocate();
304 select_scores.run();
305
306 // Validate the output
307 validate(CLAccessor(proposals_final), proposals_expected);
308 validate(CLAccessor(scores_final), scores_expected);
309}
310
311FIXTURE_DATA_TEST_CASE(ComputeAllAnchors, CLComputeAllAnchorsFixture<float>, framework::DatasetMode::ALL,
312 combine(combine(framework::dataset::make("NumAnchors", { 2, 4, 8 }), ComputeAllInfoDataset), framework::dataset::make("DataType", { DataType::F32 })))
313{
314 // Validate output
315 validate(CLAccessor(_target), _reference);
316}
317TEST_SUITE_END() // FP32
318
319TEST_SUITE(FP16)
320FIXTURE_DATA_TEST_CASE(ComputeAllAnchors, CLComputeAllAnchorsFixture<half>, framework::DatasetMode::ALL,
321 combine(combine(framework::dataset::make("NumAnchors", { 2, 4, 8 }), ComputeAllInfoDataset), framework::dataset::make("DataType", { DataType::F16 })))
322{
323 // Validate output
324 validate(CLAccessor(_target), _reference);
325}
326TEST_SUITE_END() // FP16
327TEST_SUITE_END() // Float
328
329TEST_SUITE_END() // GenerateProposals
330TEST_SUITE_END() // CL
331
332} // namespace validation
333} // namespace test
334} // namespace arm_compute