blob: 50d3f78563047351691adce37ebdf1523c9bc264 [file] [log] [blame]
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "DelegateOptionsTestHelper.hpp"
Nikhil Raj7dcc6972021-04-30 15:44:24 +01007#include <common/include/ProfilingGuid.hpp>
Colm Donelan3e32a872021-10-04 22:55:37 +01008#include <armnnUtils/Filesystem.hpp>
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00009
10namespace armnnDelegate
11{
12
13TEST_SUITE("DelegateOptions")
14{
15
16TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16")
17{
18 std::stringstream ss;
19 {
20 StreamRedirector redirect(std::cout, ss.rdbuf());
21
22 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
23 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
24 std::vector<float> inputData = { 1, 2, 3, 4 };
25 std::vector<float> divData = { 2, 2, 3, 4 };
26 std::vector<float> expectedResult = { 1, 2, 2, 2 };
27
28 // Enable ReduceFp32ToFp16
29 armnn::OptimizerOptions optimizerOptions(true, true, false, false);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +000030 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000031
32 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
33 backends,
34 tensorShape,
35 inputData,
36 inputData,
37 divData,
38 expectedResult,
39 delegateOptions);
40 }
41 // ReduceFp32ToFp16 option is enabled
42 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
43 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
44}
45
46TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug")
47{
48 std::stringstream ss;
49 {
50 StreamRedirector redirect(std::cout, ss.rdbuf());
51
52 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
53 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
54 std::vector<float> inputData = { 1, 2, 3, 4 };
55 std::vector<float> divData = { 2, 2, 3, 4 };
56 std::vector<float> expectedResult = { 1, 2, 2, 2 };
57
58 // Enable Debug
59 armnn::OptimizerOptions optimizerOptions(false, true, false, false);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +000060 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000061
62 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
63 backends,
64 tensorShape,
65 inputData,
66 inputData,
67 divData,
68 expectedResult,
69 delegateOptions);
70 }
71 // Debug option triggered.
72 CHECK(ss.str().find("layerGuid") != std::string::npos);
73 CHECK(ss.str().find("layerName") != std::string::npos);
74 CHECK(ss.str().find("outputSlot") != std::string::npos);
75 CHECK(ss.str().find("shape") != std::string::npos);
76 CHECK(ss.str().find("data") != std::string::npos);
77}
78
79TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
80{
81 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
82 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
83 std::vector<float> inputData = { 1, 2, 3, 4 };
84 std::vector<float> divData = { 2, 2, 3, 4 };
85 std::vector<float> expectedResult = { 1, 2, 2, 2 };
86
87 // Enable debug with debug callback function
88 armnn::OptimizerOptions optimizerOptions(false, true, false, false);
89 bool callback = false;
Cathal Corbett5aa9fd72022-02-25 15:33:28 +000090 auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000091 {
92 armnn::IgnoreUnused(guid);
93 armnn::IgnoreUnused(slotIndex);
94 armnn::IgnoreUnused(tensor);
95 callback = true;
96 };
97
Francis Murtagh73d3e2e2021-04-29 14:23:04 +010098 armnn::INetworkProperties networkProperties(false, armnn::MemorySource::Undefined, armnn::MemorySource::Undefined);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000099 armnnDelegate::DelegateOptions delegateOptions(backends,
100 optimizerOptions,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000101 armnn::EmptyOptional(),
102 armnn::Optional<armnn::DebugCallbackFunction>(mockCallback));
103
104 CHECK(!callback);
105
106 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
107 backends,
108 tensorShape,
109 inputData,
110 inputData,
111 divData,
112 expectedResult,
113 delegateOptions);
114
115 // Check that the debug callback function was called.
116 CHECK(callback);
117}
118
119TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToBf16")
120{
121 std::stringstream ss;
122 {
123 StreamRedirector redirect(std::cout, ss.rdbuf());
124
125 ReduceFp32ToBf16TestImpl();
126 }
127
128 // ReduceFp32ToBf16 option is enabled
129 CHECK(ss.str().find("convert_fp32_to_bf16") != std::string::npos);
130}
131
132TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
133{
134 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
135 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
136 std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
137 std::vector<uint8_t> divData = { 2, 2, 3, 4 };
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +0000138 std::vector<uint8_t> expectedResult = { 1, 2, 2, 2 };
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000139
140 armnn::OptimizerOptions optimizerOptions(false, false, false, true);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +0000141 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000142
143 DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
144 backends,
145 tensorShape,
146 inputData,
147 inputData,
148 divData,
149 expectedResult,
150 delegateOptions);
151}
152
Sadik Armaganca565c12022-08-16 12:17:24 +0100153TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback")
154{
155 std::stringstream stringStream;
156 std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
157 std::vector<std::string> values { "CpuRef", "1", "1"};
158
159 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
160 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
161 std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
162 std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
163
164 // Create options_keys and options_values char array
165 size_t num_options = keys.size();
166 std::unique_ptr<const char*> options_keys =
167 std::unique_ptr<const char*>(new const char*[num_options + 1]);
168 std::unique_ptr<const char*> options_values =
169 std::unique_ptr<const char*>(new const char*[num_options + 1]);
170 for (size_t i=0; i<num_options; ++i)
171 {
172 options_keys.get()[i] = keys[i].c_str();
173 options_values.get()[i] = values[i].c_str();
174 }
175
176 StreamRedirector redirect(std::cout, stringStream.rdbuf());
177
178 armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
179 DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
180 backends,
181 tensorShape,
182 inputData,
183 expectedResult,
184 delegateOptions);
185 CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
186 != std::string::npos);
187}
188
189TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback")
190{
191 std::stringstream stringStream;
192 std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
193 std::vector<std::string> values { "CpuRef", "1", "0"};
194
195 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
196 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
197 std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
198 std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
199
200 // Create options_keys and options_values char array
201 size_t num_options = keys.size();
202 std::unique_ptr<const char*> options_keys =
203 std::unique_ptr<const char*>(new const char*[num_options + 1]);
204 std::unique_ptr<const char*> options_values =
205 std::unique_ptr<const char*>(new const char*[num_options + 1]);
206 for (size_t i=0; i<num_options; ++i)
207 {
208 options_keys.get()[i] = keys[i].c_str();
209 options_values.get()[i] = values[i].c_str();
210 }
211
212 StreamRedirector redirect(std::cout, stringStream.rdbuf());
213
214 armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
215 DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
216 backends,
217 tensorShape,
218 inputData,
219 expectedResult,
220 delegateOptions);
221
222 CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
223 == std::string::npos);
224}
225
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000226}
227
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000228TEST_SUITE("DelegateOptions_CpuAccTests")
229{
230
231TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
232{
233 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
234 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
235 std::vector<float> inputData = { 1, 2, 3, 4 };
236 std::vector<float> divData = { 2, 2, 3, 4 };
237 std::vector<float> expectedResult = { 1, 2, 2, 2 };
238
239 unsigned int numberOfThreads = 2;
240
241 armnn::ModelOptions modelOptions;
242 armnn::BackendOptions cpuAcc("CpuAcc",
243 {
244 { "FastMathEnabled", true },
245 { "NumberOfThreads", numberOfThreads }
246 });
247 modelOptions.push_back(cpuAcc);
248
Francis Murtagh626bd902022-06-21 13:16:23 +0000249 armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000250 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
251
252 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
253 backends,
254 tensorShape,
255 inputData,
256 inputData,
257 divData,
258 expectedResult,
259 delegateOptions);
260}
261
Colm Donelan3e32a872021-10-04 22:55:37 +0100262TEST_CASE ("ArmnnDelegateSerializeToDot")
263{
264 const fs::path filename(fs::temp_directory_path() / "ArmnnDelegateSerializeToDot.dot");
265 if ( fs::exists(filename) )
266 {
267 fs::remove(filename);
268 }
269 std::stringstream ss;
270 {
271 StreamRedirector redirect(std::cout, ss.rdbuf());
272
273 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
274 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
275 std::vector<float> inputData = { 1, 2, 3, 4 };
276 std::vector<float> divData = { 2, 2, 3, 4 };
277 std::vector<float> expectedResult = { 1, 2, 2, 2 };
278
279 armnn::OptimizerOptions optimizerOptions(false, false, false, false);
280 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
281 // Enable serialize to dot by specifying the target file name.
282 delegateOptions.SetSerializeToDot(filename);
283 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
284 backends,
285 tensorShape,
286 inputData,
287 inputData,
288 divData,
289 expectedResult,
290 delegateOptions);
291 }
292 CHECK(fs::exists(filename));
293 // The file should have a size greater than 0 bytes.
294 CHECK(fs::file_size(filename) > 0);
295 // Clean up.
296 fs::remove(filename);
297}
298
Jan Eilersf39f8d82021-10-26 16:57:34 +0100299void CreateFp16StringParsingTestRun(std::vector<std::string>& keys,
300 std::vector<std::string>& values,
301 std::stringstream& ss)
302{
303 StreamRedirector redirect(std::cout, ss.rdbuf());
304
305 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
306 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
307 std::vector<float> inputData = { 1, 2, 3, 4 };
308 std::vector<float> divData = { 2, 2, 3, 4 };
309 std::vector<float> expectedResult = { 1, 2, 2, 2 };
310
311 // Create options_keys and options_values char array
312 size_t num_options = keys.size();
313 std::unique_ptr<const char*> options_keys =
314 std::unique_ptr<const char*>(new const char*[num_options + 1]);
315 std::unique_ptr<const char*> options_values =
316 std::unique_ptr<const char*>(new const char*[num_options + 1]);
317 for (size_t i=0; i<num_options; ++i)
318 {
319 options_keys.get()[i] = keys[i].c_str();
320 options_values.get()[i] = values[i].c_str();
321 }
322
323 armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
324 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
325 backends,
326 tensorShape,
327 inputData,
328 inputData,
329 divData,
330 expectedResult,
331 delegateOptions);
332}
333
334TEST_CASE ("ArmnnDelegateStringParsingOptionReduceFp32ToFp16")
335{
336 SUBCASE("Fp16=1")
337 {
338 std::stringstream ss;
339 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16", "logging-severity"};
340 std::vector<std::string> values { "CpuRef", "1", "1", "info"};
341 CreateFp16StringParsingTestRun(keys, values, ss);
342 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
343 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
344 }
345 SUBCASE("Fp16=true")
346 {
347 std::stringstream ss;
348 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
349 std::vector<std::string> values { "CpuRef", "TRUE", "true"};
350 CreateFp16StringParsingTestRun(keys, values, ss);
351 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
352 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
353 }
354 SUBCASE("Fp16=True")
355 {
356 std::stringstream ss;
357 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
358 std::vector<std::string> values { "CpuRef", "true", "True"};
359 CreateFp16StringParsingTestRun(keys, values, ss);
360 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
361 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
362 }
363 SUBCASE("Fp16=0")
364 {
365 std::stringstream ss;
366 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
367 std::vector<std::string> values { "CpuRef", "true", "0"};
368 CreateFp16StringParsingTestRun(keys, values, ss);
369 CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
370 CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
371 }
372 SUBCASE("Fp16=false")
373 {
374 std::stringstream ss;
375 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
376 std::vector<std::string> values { "CpuRef", "1", "false"};
377 CreateFp16StringParsingTestRun(keys, values, ss);
378 CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
379 CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
380 }
381}
382
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000383}
384
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000385} // namespace armnnDelegate