blob: 98323131f9eda00e198cf2b1f7f0063ebdf94de5 [file] [log] [blame]
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "DelegateOptionsTestHelper.hpp"
Nikhil Raj7dcc6972021-04-30 15:44:24 +01007#include <common/include/ProfilingGuid.hpp>
Colm Donelan3e32a872021-10-04 22:55:37 +01008#include <armnnUtils/Filesystem.hpp>
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00009
10namespace armnnDelegate
11{
12
13TEST_SUITE("DelegateOptions")
14{
15
16TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16")
17{
18 std::stringstream ss;
19 {
20 StreamRedirector redirect(std::cout, ss.rdbuf());
21
22 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
23 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
24 std::vector<float> inputData = { 1, 2, 3, 4 };
25 std::vector<float> divData = { 2, 2, 3, 4 };
26 std::vector<float> expectedResult = { 1, 2, 2, 2 };
27
28 // Enable ReduceFp32ToFp16
29 armnn::OptimizerOptions optimizerOptions(true, true, false, false);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +000030 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000031
32 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
33 backends,
34 tensorShape,
35 inputData,
36 inputData,
37 divData,
38 expectedResult,
39 delegateOptions);
40 }
41 // ReduceFp32ToFp16 option is enabled
42 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
43 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
44}
45
46TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug")
47{
48 std::stringstream ss;
49 {
50 StreamRedirector redirect(std::cout, ss.rdbuf());
51
52 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
53 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
54 std::vector<float> inputData = { 1, 2, 3, 4 };
55 std::vector<float> divData = { 2, 2, 3, 4 };
56 std::vector<float> expectedResult = { 1, 2, 2, 2 };
57
58 // Enable Debug
59 armnn::OptimizerOptions optimizerOptions(false, true, false, false);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +000060 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000061
62 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
63 backends,
64 tensorShape,
65 inputData,
66 inputData,
67 divData,
68 expectedResult,
69 delegateOptions);
70 }
71 // Debug option triggered.
72 CHECK(ss.str().find("layerGuid") != std::string::npos);
73 CHECK(ss.str().find("layerName") != std::string::npos);
74 CHECK(ss.str().find("outputSlot") != std::string::npos);
75 CHECK(ss.str().find("shape") != std::string::npos);
76 CHECK(ss.str().find("data") != std::string::npos);
77}
78
79TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
80{
81 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
82 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
83 std::vector<float> inputData = { 1, 2, 3, 4 };
84 std::vector<float> divData = { 2, 2, 3, 4 };
85 std::vector<float> expectedResult = { 1, 2, 2, 2 };
86
87 // Enable debug with debug callback function
88 armnn::OptimizerOptions optimizerOptions(false, true, false, false);
89 bool callback = false;
Cathal Corbett5aa9fd72022-02-25 15:33:28 +000090 auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000091 {
92 armnn::IgnoreUnused(guid);
93 armnn::IgnoreUnused(slotIndex);
94 armnn::IgnoreUnused(tensor);
95 callback = true;
96 };
97
Francis Murtagh73d3e2e2021-04-29 14:23:04 +010098 armnn::INetworkProperties networkProperties(false, armnn::MemorySource::Undefined, armnn::MemorySource::Undefined);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000099 armnnDelegate::DelegateOptions delegateOptions(backends,
100 optimizerOptions,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000101 armnn::EmptyOptional(),
102 armnn::Optional<armnn::DebugCallbackFunction>(mockCallback));
103
104 CHECK(!callback);
105
106 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
107 backends,
108 tensorShape,
109 inputData,
110 inputData,
111 divData,
112 expectedResult,
113 delegateOptions);
114
115 // Check that the debug callback function was called.
116 CHECK(callback);
117}
118
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000119TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
120{
121 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
122 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
123 std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
124 std::vector<uint8_t> divData = { 2, 2, 3, 4 };
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +0000125 std::vector<uint8_t> expectedResult = { 1, 2, 2, 2 };
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000126
127 armnn::OptimizerOptions optimizerOptions(false, false, false, true);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +0000128 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000129
130 DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
131 backends,
132 tensorShape,
133 inputData,
134 inputData,
135 divData,
136 expectedResult,
137 delegateOptions);
138}
139
Sadik Armaganca565c12022-08-16 12:17:24 +0100140TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback")
141{
142 std::stringstream stringStream;
143 std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
144 std::vector<std::string> values { "CpuRef", "1", "1"};
145
146 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
147 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
148 std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
149 std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
150
151 // Create options_keys and options_values char array
152 size_t num_options = keys.size();
153 std::unique_ptr<const char*> options_keys =
154 std::unique_ptr<const char*>(new const char*[num_options + 1]);
155 std::unique_ptr<const char*> options_values =
156 std::unique_ptr<const char*>(new const char*[num_options + 1]);
157 for (size_t i=0; i<num_options; ++i)
158 {
159 options_keys.get()[i] = keys[i].c_str();
160 options_values.get()[i] = values[i].c_str();
161 }
162
163 StreamRedirector redirect(std::cout, stringStream.rdbuf());
164
165 armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
166 DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
167 backends,
168 tensorShape,
169 inputData,
170 expectedResult,
171 delegateOptions);
172 CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
173 != std::string::npos);
174}
175
176TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback")
177{
178 std::stringstream stringStream;
179 std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
180 std::vector<std::string> values { "CpuRef", "1", "0"};
181
182 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
183 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
184 std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
185 std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
186
187 // Create options_keys and options_values char array
188 size_t num_options = keys.size();
189 std::unique_ptr<const char*> options_keys =
190 std::unique_ptr<const char*>(new const char*[num_options + 1]);
191 std::unique_ptr<const char*> options_values =
192 std::unique_ptr<const char*>(new const char*[num_options + 1]);
193 for (size_t i=0; i<num_options; ++i)
194 {
195 options_keys.get()[i] = keys[i].c_str();
196 options_values.get()[i] = values[i].c_str();
197 }
198
199 StreamRedirector redirect(std::cout, stringStream.rdbuf());
200
201 armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
202 DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
203 backends,
204 tensorShape,
205 inputData,
206 expectedResult,
207 delegateOptions);
208
209 CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
210 == std::string::npos);
211}
212
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000213}
214
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000215TEST_SUITE("DelegateOptions_CpuAccTests")
216{
217
218TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
219{
220 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
221 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
222 std::vector<float> inputData = { 1, 2, 3, 4 };
223 std::vector<float> divData = { 2, 2, 3, 4 };
224 std::vector<float> expectedResult = { 1, 2, 2, 2 };
225
226 unsigned int numberOfThreads = 2;
227
228 armnn::ModelOptions modelOptions;
229 armnn::BackendOptions cpuAcc("CpuAcc",
230 {
231 { "FastMathEnabled", true },
232 { "NumberOfThreads", numberOfThreads }
233 });
234 modelOptions.push_back(cpuAcc);
235
Francis Murtagh626bd902022-06-21 13:16:23 +0000236 armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000237 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
238
239 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
240 backends,
241 tensorShape,
242 inputData,
243 inputData,
244 divData,
245 expectedResult,
246 delegateOptions);
247}
248
Colm Donelan3e32a872021-10-04 22:55:37 +0100249TEST_CASE ("ArmnnDelegateSerializeToDot")
250{
251 const fs::path filename(fs::temp_directory_path() / "ArmnnDelegateSerializeToDot.dot");
252 if ( fs::exists(filename) )
253 {
254 fs::remove(filename);
255 }
256 std::stringstream ss;
257 {
258 StreamRedirector redirect(std::cout, ss.rdbuf());
259
260 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
261 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
262 std::vector<float> inputData = { 1, 2, 3, 4 };
263 std::vector<float> divData = { 2, 2, 3, 4 };
264 std::vector<float> expectedResult = { 1, 2, 2, 2 };
265
266 armnn::OptimizerOptions optimizerOptions(false, false, false, false);
267 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
268 // Enable serialize to dot by specifying the target file name.
269 delegateOptions.SetSerializeToDot(filename);
270 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
271 backends,
272 tensorShape,
273 inputData,
274 inputData,
275 divData,
276 expectedResult,
277 delegateOptions);
278 }
279 CHECK(fs::exists(filename));
280 // The file should have a size greater than 0 bytes.
281 CHECK(fs::file_size(filename) > 0);
282 // Clean up.
283 fs::remove(filename);
284}
285
Jan Eilersf39f8d82021-10-26 16:57:34 +0100286void CreateFp16StringParsingTestRun(std::vector<std::string>& keys,
287 std::vector<std::string>& values,
288 std::stringstream& ss)
289{
290 StreamRedirector redirect(std::cout, ss.rdbuf());
291
292 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
293 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
294 std::vector<float> inputData = { 1, 2, 3, 4 };
295 std::vector<float> divData = { 2, 2, 3, 4 };
296 std::vector<float> expectedResult = { 1, 2, 2, 2 };
297
298 // Create options_keys and options_values char array
299 size_t num_options = keys.size();
300 std::unique_ptr<const char*> options_keys =
301 std::unique_ptr<const char*>(new const char*[num_options + 1]);
302 std::unique_ptr<const char*> options_values =
303 std::unique_ptr<const char*>(new const char*[num_options + 1]);
304 for (size_t i=0; i<num_options; ++i)
305 {
306 options_keys.get()[i] = keys[i].c_str();
307 options_values.get()[i] = values[i].c_str();
308 }
309
310 armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
311 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
312 backends,
313 tensorShape,
314 inputData,
315 inputData,
316 divData,
317 expectedResult,
318 delegateOptions);
319}
320
321TEST_CASE ("ArmnnDelegateStringParsingOptionReduceFp32ToFp16")
322{
323 SUBCASE("Fp16=1")
324 {
325 std::stringstream ss;
326 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16", "logging-severity"};
327 std::vector<std::string> values { "CpuRef", "1", "1", "info"};
328 CreateFp16StringParsingTestRun(keys, values, ss);
329 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
330 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
331 }
332 SUBCASE("Fp16=true")
333 {
334 std::stringstream ss;
335 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
336 std::vector<std::string> values { "CpuRef", "TRUE", "true"};
337 CreateFp16StringParsingTestRun(keys, values, ss);
338 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
339 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
340 }
341 SUBCASE("Fp16=True")
342 {
343 std::stringstream ss;
344 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
345 std::vector<std::string> values { "CpuRef", "true", "True"};
346 CreateFp16StringParsingTestRun(keys, values, ss);
347 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
348 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
349 }
350 SUBCASE("Fp16=0")
351 {
352 std::stringstream ss;
353 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
354 std::vector<std::string> values { "CpuRef", "true", "0"};
355 CreateFp16StringParsingTestRun(keys, values, ss);
356 CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
357 CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
358 }
359 SUBCASE("Fp16=false")
360 {
361 std::stringstream ss;
362 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
363 std::vector<std::string> values { "CpuRef", "1", "false"};
364 CreateFp16StringParsingTestRun(keys, values, ss);
365 CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
366 CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
367 }
368}
369
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000370}
371
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000372} // namespace armnnDelegate