blob: 7724f916d4c78ab5e83f5db6ec3c97d7f8cc2b8e [file] [log] [blame]
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00001//
Colm Donelan7bcae3c2024-01-22 10:07:14 +00002// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "DelegateOptionsTestHelper.hpp"
Nikhil Raj7dcc6972021-04-30 15:44:24 +01007#include <common/include/ProfilingGuid.hpp>
Colm Donelan3e32a872021-10-04 22:55:37 +01008#include <armnnUtils/Filesystem.hpp>
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00009
Colm Donelan7bcae3c2024-01-22 10:07:14 +000010#include <doctest/doctest.h>
11
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000012namespace armnnDelegate
13{
14
15TEST_SUITE("DelegateOptions")
16{
17
18TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16")
19{
20 std::stringstream ss;
21 {
22 StreamRedirector redirect(std::cout, ss.rdbuf());
23
24 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
25 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
26 std::vector<float> inputData = { 1, 2, 3, 4 };
27 std::vector<float> divData = { 2, 2, 3, 4 };
28 std::vector<float> expectedResult = { 1, 2, 2, 2 };
29
30 // Enable ReduceFp32ToFp16
John Mcloughlinc5ee0d72023-03-24 12:07:25 +000031 armnn::OptimizerOptionsOpaque optimizerOptions(true, true, false, false);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +000032 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000033
34 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000035 tensorShape,
36 inputData,
37 inputData,
38 divData,
39 expectedResult,
40 delegateOptions);
41 }
42 // ReduceFp32ToFp16 option is enabled
43 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
44 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
45}
46
47TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug")
48{
49 std::stringstream ss;
50 {
51 StreamRedirector redirect(std::cout, ss.rdbuf());
52
53 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
54 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
55 std::vector<float> inputData = { 1, 2, 3, 4 };
56 std::vector<float> divData = { 2, 2, 3, 4 };
57 std::vector<float> expectedResult = { 1, 2, 2, 2 };
58
59 // Enable Debug
John Mcloughlinc5ee0d72023-03-24 12:07:25 +000060 armnn::OptimizerOptionsOpaque optimizerOptions(false, true, false, false);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +000061 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000062
63 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000064 tensorShape,
65 inputData,
66 inputData,
67 divData,
68 expectedResult,
69 delegateOptions);
70 }
71 // Debug option triggered.
72 CHECK(ss.str().find("layerGuid") != std::string::npos);
73 CHECK(ss.str().find("layerName") != std::string::npos);
74 CHECK(ss.str().find("outputSlot") != std::string::npos);
75 CHECK(ss.str().find("shape") != std::string::npos);
76 CHECK(ss.str().find("data") != std::string::npos);
77}
78
79TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
80{
81 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
82 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
83 std::vector<float> inputData = { 1, 2, 3, 4 };
84 std::vector<float> divData = { 2, 2, 3, 4 };
85 std::vector<float> expectedResult = { 1, 2, 2, 2 };
86
87 // Enable debug with debug callback function
John Mcloughlinc5ee0d72023-03-24 12:07:25 +000088 armnn::OptimizerOptionsOpaque optimizerOptions(false, true, false, false);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000089 bool callback = false;
Cathal Corbett5aa9fd72022-02-25 15:33:28 +000090 auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000091 {
92 armnn::IgnoreUnused(guid);
93 armnn::IgnoreUnused(slotIndex);
94 armnn::IgnoreUnused(tensor);
95 callback = true;
96 };
97
Francis Murtagh73d3e2e2021-04-29 14:23:04 +010098 armnn::INetworkProperties networkProperties(false, armnn::MemorySource::Undefined, armnn::MemorySource::Undefined);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000099 armnnDelegate::DelegateOptions delegateOptions(backends,
100 optimizerOptions,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000101 armnn::EmptyOptional(),
102 armnn::Optional<armnn::DebugCallbackFunction>(mockCallback));
103
104 CHECK(!callback);
105
106 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000107 tensorShape,
108 inputData,
109 inputData,
110 divData,
111 expectedResult,
112 delegateOptions);
113
114 // Check that the debug callback function was called.
115 CHECK(callback);
116}
117
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000118TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
119{
Matthew Sloyanebe392d2023-03-30 10:12:08 +0100120 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000121 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
122 std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
123 std::vector<uint8_t> divData = { 2, 2, 3, 4 };
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +0000124 std::vector<uint8_t> expectedResult = { 1, 2, 2, 2 };
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000125
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000126 armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false, true);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +0000127 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000128
129 DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000130 tensorShape,
131 inputData,
132 inputData,
133 divData,
134 expectedResult,
135 delegateOptions);
136}
137
Sadik Armaganca565c12022-08-16 12:17:24 +0100138TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback")
139{
140 std::stringstream stringStream;
141 std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
142 std::vector<std::string> values { "CpuRef", "1", "1"};
143
144 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
145 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
146 std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
147 std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
148
149 // Create options_keys and options_values char array
150 size_t num_options = keys.size();
151 std::unique_ptr<const char*> options_keys =
152 std::unique_ptr<const char*>(new const char*[num_options + 1]);
153 std::unique_ptr<const char*> options_values =
154 std::unique_ptr<const char*>(new const char*[num_options + 1]);
155 for (size_t i=0; i<num_options; ++i)
156 {
157 options_keys.get()[i] = keys[i].c_str();
158 options_values.get()[i] = values[i].c_str();
159 }
160
161 StreamRedirector redirect(std::cout, stringStream.rdbuf());
162
163 armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
164 DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
Sadik Armaganca565c12022-08-16 12:17:24 +0100165 tensorShape,
166 inputData,
167 expectedResult,
168 delegateOptions);
169 CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
170 != std::string::npos);
171}
172
173TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback")
174{
175 std::stringstream stringStream;
176 std::vector<std::string> keys { "backends", "debug-data", "disable-tflite-runtime-fallback"};
177 std::vector<std::string> values { "CpuRef", "1", "0"};
178
179 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
180 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
181 std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
Teresa Charlin93f0ad02023-03-23 15:28:02 +0000182 std::vector<float> expectedResult = { 0.995004177f, -0.504846036f, -0.989992499f, -0.112152621f };
Sadik Armaganca565c12022-08-16 12:17:24 +0100183
184 // Create options_keys and options_values char array
185 size_t num_options = keys.size();
186 std::unique_ptr<const char*> options_keys =
187 std::unique_ptr<const char*>(new const char*[num_options + 1]);
188 std::unique_ptr<const char*> options_values =
189 std::unique_ptr<const char*>(new const char*[num_options + 1]);
190 for (size_t i=0; i<num_options; ++i)
191 {
192 options_keys.get()[i] = keys[i].c_str();
193 options_values.get()[i] = values[i].c_str();
194 }
195
196 StreamRedirector redirect(std::cout, stringStream.rdbuf());
197
198 armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
199 DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
Sadik Armaganca565c12022-08-16 12:17:24 +0100200 tensorShape,
201 inputData,
202 expectedResult,
203 delegateOptions);
204
205 CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
206 == std::string::npos);
207}
208
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000209}
210
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000211TEST_SUITE("DelegateOptions_CpuAccTests")
212{
213
214TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
215{
216 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
217 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
218 std::vector<float> inputData = { 1, 2, 3, 4 };
219 std::vector<float> divData = { 2, 2, 3, 4 };
220 std::vector<float> expectedResult = { 1, 2, 2, 2 };
221
222 unsigned int numberOfThreads = 2;
223
224 armnn::ModelOptions modelOptions;
225 armnn::BackendOptions cpuAcc("CpuAcc",
226 {
227 { "FastMathEnabled", true },
228 { "NumberOfThreads", numberOfThreads }
229 });
230 modelOptions.push_back(cpuAcc);
231
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000232 armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false,
233 false, modelOptions, false);
Colm Donelan7bcae3c2024-01-22 10:07:14 +0000234 std::vector<armnn::BackendId> availableBackends = CaptureAvailableBackends(backends);
235 // It's possible that CpuAcc isn't supported. In that case availableBackends will be empty.
236 if (availableBackends.empty())
237 {
238 return;
239 }
240 armnnDelegate::DelegateOptions delegateOptions(availableBackends, optimizerOptions);
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000241
242 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000243 tensorShape,
244 inputData,
245 inputData,
246 divData,
247 expectedResult,
248 delegateOptions);
249}
250
Colm Donelan3e32a872021-10-04 22:55:37 +0100251TEST_CASE ("ArmnnDelegateSerializeToDot")
252{
253 const fs::path filename(fs::temp_directory_path() / "ArmnnDelegateSerializeToDot.dot");
254 if ( fs::exists(filename) )
255 {
256 fs::remove(filename);
257 }
258 std::stringstream ss;
259 {
260 StreamRedirector redirect(std::cout, ss.rdbuf());
261
262 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
263 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
264 std::vector<float> inputData = { 1, 2, 3, 4 };
265 std::vector<float> divData = { 2, 2, 3, 4 };
266 std::vector<float> expectedResult = { 1, 2, 2, 2 };
267
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000268 armnn::OptimizerOptionsOpaque optimizerOptions(false, false,
269 false, false);
Colm Donelan3e32a872021-10-04 22:55:37 +0100270 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
271 // Enable serialize to dot by specifying the target file name.
272 delegateOptions.SetSerializeToDot(filename);
273 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
Colm Donelan3e32a872021-10-04 22:55:37 +0100274 tensorShape,
275 inputData,
276 inputData,
277 divData,
278 expectedResult,
279 delegateOptions);
280 }
281 CHECK(fs::exists(filename));
282 // The file should have a size greater than 0 bytes.
283 CHECK(fs::file_size(filename) > 0);
284 // Clean up.
285 fs::remove(filename);
286}
287
Jan Eilersf39f8d82021-10-26 16:57:34 +0100288void CreateFp16StringParsingTestRun(std::vector<std::string>& keys,
289 std::vector<std::string>& values,
290 std::stringstream& ss)
291{
292 StreamRedirector redirect(std::cout, ss.rdbuf());
293
294 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
295 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
296 std::vector<float> inputData = { 1, 2, 3, 4 };
297 std::vector<float> divData = { 2, 2, 3, 4 };
298 std::vector<float> expectedResult = { 1, 2, 2, 2 };
299
300 // Create options_keys and options_values char array
301 size_t num_options = keys.size();
302 std::unique_ptr<const char*> options_keys =
303 std::unique_ptr<const char*>(new const char*[num_options + 1]);
304 std::unique_ptr<const char*> options_values =
305 std::unique_ptr<const char*>(new const char*[num_options + 1]);
306 for (size_t i=0; i<num_options; ++i)
307 {
308 options_keys.get()[i] = keys[i].c_str();
309 options_values.get()[i] = values[i].c_str();
310 }
311
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000312 armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(),
313 num_options, nullptr);
Jan Eilersf39f8d82021-10-26 16:57:34 +0100314 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
Jan Eilersf39f8d82021-10-26 16:57:34 +0100315 tensorShape,
316 inputData,
317 inputData,
318 divData,
319 expectedResult,
320 delegateOptions);
321}
322
323TEST_CASE ("ArmnnDelegateStringParsingOptionReduceFp32ToFp16")
324{
325 SUBCASE("Fp16=1")
326 {
327 std::stringstream ss;
328 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16", "logging-severity"};
329 std::vector<std::string> values { "CpuRef", "1", "1", "info"};
330 CreateFp16StringParsingTestRun(keys, values, ss);
331 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
332 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
333 }
334 SUBCASE("Fp16=true")
335 {
336 std::stringstream ss;
337 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
338 std::vector<std::string> values { "CpuRef", "TRUE", "true"};
339 CreateFp16StringParsingTestRun(keys, values, ss);
340 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
341 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
342 }
343 SUBCASE("Fp16=True")
344 {
345 std::stringstream ss;
346 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
347 std::vector<std::string> values { "CpuRef", "true", "True"};
348 CreateFp16StringParsingTestRun(keys, values, ss);
349 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
350 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
351 }
352 SUBCASE("Fp16=0")
353 {
354 std::stringstream ss;
355 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
356 std::vector<std::string> values { "CpuRef", "true", "0"};
357 CreateFp16StringParsingTestRun(keys, values, ss);
358 CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
359 CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
360 }
361 SUBCASE("Fp16=false")
362 {
363 std::stringstream ss;
364 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
365 std::vector<std::string> values { "CpuRef", "1", "false"};
366 CreateFp16StringParsingTestRun(keys, values, ss);
367 CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
368 CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
369 }
370}
371
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000372}
373
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000374} // namespace armnnDelegate