blob: c9f1530968e2c5ebc29fa71dd4d576dc5e767740 [file] [log] [blame]
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "DelegateOptionsTestHelper.hpp"
Nikhil Raj7dcc6972021-04-30 15:44:24 +01007#include <common/include/ProfilingGuid.hpp>
Colm Donelan3e32a872021-10-04 22:55:37 +01008#include <armnnUtils/Filesystem.hpp>
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +00009
10namespace armnnDelegate
11{
12
13TEST_SUITE("DelegateOptions")
14{
15
16TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16")
17{
18 std::stringstream ss;
19 {
20 StreamRedirector redirect(std::cout, ss.rdbuf());
21
22 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
23 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
24 std::vector<float> inputData = { 1, 2, 3, 4 };
25 std::vector<float> divData = { 2, 2, 3, 4 };
26 std::vector<float> expectedResult = { 1, 2, 2, 2 };
27
28 // Enable ReduceFp32ToFp16
29 armnn::OptimizerOptions optimizerOptions(true, true, false, false);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +000030 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000031
32 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
33 backends,
34 tensorShape,
35 inputData,
36 inputData,
37 divData,
38 expectedResult,
39 delegateOptions);
40 }
41 // ReduceFp32ToFp16 option is enabled
42 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
43 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
44}
45
46TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug")
47{
48 std::stringstream ss;
49 {
50 StreamRedirector redirect(std::cout, ss.rdbuf());
51
52 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
53 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
54 std::vector<float> inputData = { 1, 2, 3, 4 };
55 std::vector<float> divData = { 2, 2, 3, 4 };
56 std::vector<float> expectedResult = { 1, 2, 2, 2 };
57
58 // Enable Debug
59 armnn::OptimizerOptions optimizerOptions(false, true, false, false);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +000060 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000061
62 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
63 backends,
64 tensorShape,
65 inputData,
66 inputData,
67 divData,
68 expectedResult,
69 delegateOptions);
70 }
71 // Debug option triggered.
72 CHECK(ss.str().find("layerGuid") != std::string::npos);
73 CHECK(ss.str().find("layerName") != std::string::npos);
74 CHECK(ss.str().find("outputSlot") != std::string::npos);
75 CHECK(ss.str().find("shape") != std::string::npos);
76 CHECK(ss.str().find("data") != std::string::npos);
77}
78
79TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
80{
81 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
82 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
83 std::vector<float> inputData = { 1, 2, 3, 4 };
84 std::vector<float> divData = { 2, 2, 3, 4 };
85 std::vector<float> expectedResult = { 1, 2, 2, 2 };
86
87 // Enable debug with debug callback function
88 armnn::OptimizerOptions optimizerOptions(false, true, false, false);
89 bool callback = false;
Cathal Corbett5aa9fd72022-02-25 15:33:28 +000090 auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000091 {
92 armnn::IgnoreUnused(guid);
93 armnn::IgnoreUnused(slotIndex);
94 armnn::IgnoreUnused(tensor);
95 callback = true;
96 };
97
Francis Murtagh73d3e2e2021-04-29 14:23:04 +010098 armnn::INetworkProperties networkProperties(false, armnn::MemorySource::Undefined, armnn::MemorySource::Undefined);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000099 armnnDelegate::DelegateOptions delegateOptions(backends,
100 optimizerOptions,
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000101 armnn::EmptyOptional(),
102 armnn::Optional<armnn::DebugCallbackFunction>(mockCallback));
103
104 CHECK(!callback);
105
106 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
107 backends,
108 tensorShape,
109 inputData,
110 inputData,
111 divData,
112 expectedResult,
113 delegateOptions);
114
115 // Check that the debug callback function was called.
116 CHECK(callback);
117}
118
119TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToBf16")
120{
121 std::stringstream ss;
122 {
123 StreamRedirector redirect(std::cout, ss.rdbuf());
124
125 ReduceFp32ToBf16TestImpl();
126 }
127
128 // ReduceFp32ToBf16 option is enabled
129 CHECK(ss.str().find("convert_fp32_to_bf16") != std::string::npos);
130}
131
132TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
133{
134 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
135 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
136 std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
137 std::vector<uint8_t> divData = { 2, 2, 3, 4 };
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +0000138 std::vector<uint8_t> expectedResult = { 1, 2, 2, 2 };
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000139
140 armnn::OptimizerOptions optimizerOptions(false, false, false, true);
Narumol Prangnawarat74a3cf52021-01-29 15:38:54 +0000141 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000142
143 DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
144 backends,
145 tensorShape,
146 inputData,
147 inputData,
148 divData,
149 expectedResult,
150 delegateOptions);
151}
152
153}
154
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000155TEST_SUITE("DelegateOptions_CpuAccTests")
156{
157
158TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
159{
160 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
161 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
162 std::vector<float> inputData = { 1, 2, 3, 4 };
163 std::vector<float> divData = { 2, 2, 3, 4 };
164 std::vector<float> expectedResult = { 1, 2, 2, 2 };
165
166 unsigned int numberOfThreads = 2;
167
168 armnn::ModelOptions modelOptions;
169 armnn::BackendOptions cpuAcc("CpuAcc",
170 {
171 { "FastMathEnabled", true },
172 { "NumberOfThreads", numberOfThreads }
173 });
174 modelOptions.push_back(cpuAcc);
175
Colm Donelan03bf98a2022-05-30 15:20:36 +0100176 armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000177 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
178
179 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
180 backends,
181 tensorShape,
182 inputData,
183 inputData,
184 divData,
185 expectedResult,
186 delegateOptions);
187}
188
Colm Donelan3e32a872021-10-04 22:55:37 +0100189TEST_CASE ("ArmnnDelegateSerializeToDot")
190{
191 const fs::path filename(fs::temp_directory_path() / "ArmnnDelegateSerializeToDot.dot");
192 if ( fs::exists(filename) )
193 {
194 fs::remove(filename);
195 }
196 std::stringstream ss;
197 {
198 StreamRedirector redirect(std::cout, ss.rdbuf());
199
200 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
201 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
202 std::vector<float> inputData = { 1, 2, 3, 4 };
203 std::vector<float> divData = { 2, 2, 3, 4 };
204 std::vector<float> expectedResult = { 1, 2, 2, 2 };
205
206 armnn::OptimizerOptions optimizerOptions(false, false, false, false);
207 armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
208 // Enable serialize to dot by specifying the target file name.
209 delegateOptions.SetSerializeToDot(filename);
210 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
211 backends,
212 tensorShape,
213 inputData,
214 inputData,
215 divData,
216 expectedResult,
217 delegateOptions);
218 }
219 CHECK(fs::exists(filename));
220 // The file should have a size greater than 0 bytes.
221 CHECK(fs::file_size(filename) > 0);
222 // Clean up.
223 fs::remove(filename);
224}
225
Jan Eilersf39f8d82021-10-26 16:57:34 +0100226void CreateFp16StringParsingTestRun(std::vector<std::string>& keys,
227 std::vector<std::string>& values,
228 std::stringstream& ss)
229{
230 StreamRedirector redirect(std::cout, ss.rdbuf());
231
232 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
233 std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
234 std::vector<float> inputData = { 1, 2, 3, 4 };
235 std::vector<float> divData = { 2, 2, 3, 4 };
236 std::vector<float> expectedResult = { 1, 2, 2, 2 };
237
238 // Create options_keys and options_values char array
239 size_t num_options = keys.size();
240 std::unique_ptr<const char*> options_keys =
241 std::unique_ptr<const char*>(new const char*[num_options + 1]);
242 std::unique_ptr<const char*> options_values =
243 std::unique_ptr<const char*>(new const char*[num_options + 1]);
244 for (size_t i=0; i<num_options; ++i)
245 {
246 options_keys.get()[i] = keys[i].c_str();
247 options_values.get()[i] = values[i].c_str();
248 }
249
250 armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
251 DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
252 backends,
253 tensorShape,
254 inputData,
255 inputData,
256 divData,
257 expectedResult,
258 delegateOptions);
259}
260
261TEST_CASE ("ArmnnDelegateStringParsingOptionReduceFp32ToFp16")
262{
263 SUBCASE("Fp16=1")
264 {
265 std::stringstream ss;
266 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16", "logging-severity"};
267 std::vector<std::string> values { "CpuRef", "1", "1", "info"};
268 CreateFp16StringParsingTestRun(keys, values, ss);
269 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
270 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
271 }
272 SUBCASE("Fp16=true")
273 {
274 std::stringstream ss;
275 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
276 std::vector<std::string> values { "CpuRef", "TRUE", "true"};
277 CreateFp16StringParsingTestRun(keys, values, ss);
278 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
279 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
280 }
281 SUBCASE("Fp16=True")
282 {
283 std::stringstream ss;
284 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
285 std::vector<std::string> values { "CpuRef", "true", "True"};
286 CreateFp16StringParsingTestRun(keys, values, ss);
287 CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
288 CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
289 }
290 SUBCASE("Fp16=0")
291 {
292 std::stringstream ss;
293 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
294 std::vector<std::string> values { "CpuRef", "true", "0"};
295 CreateFp16StringParsingTestRun(keys, values, ss);
296 CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
297 CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
298 }
299 SUBCASE("Fp16=false")
300 {
301 std::stringstream ss;
302 std::vector<std::string> keys { "backends", "debug-data", "reduce-fp32-to-fp16"};
303 std::vector<std::string> values { "CpuRef", "1", "false"};
304 CreateFp16StringParsingTestRun(keys, values, ss);
305 CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
306 CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
307 }
308}
309
Colm Donelan3e32a872021-10-04 22:55:37 +0100310
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000311}
312
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +0000313} // namespace armnnDelegate