blob: 295c870370f43b1e91aac74988223af9018adbf6 [file] [log] [blame]
Richard Burtondc0c6ed2020-04-08 16:39:05 +01001# Copyright © 2020 Arm Ltd. All rights reserved.
2# SPDX-License-Identifier: MIT
3import os
4
5import pytest
Jan Eilers1b2654f2021-09-24 15:45:46 +01006import warnings
Richard Burtondc0c6ed2020-04-08 16:39:05 +01007import numpy as np
8
9import pyarmnn as ann
10
11
12@pytest.fixture(scope="function")
13def random_runtime(shared_data_folder):
14 parser = ann.ITfLiteParser()
15 network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite'))
16 preferred_backends = [ann.BackendId('CpuRef')]
17 options = ann.CreationOptions()
Éanna Ó Catháin59da3692020-04-16 08:54:12 +010018
Richard Burtondc0c6ed2020-04-08 16:39:05 +010019 runtime = ann.IRuntime(options)
20
21 graphs_count = parser.GetSubgraphCount()
22
23 graph_id = graphs_count - 1
24 input_names = parser.GetSubgraphInputTensorNames(graph_id)
25
26 input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, input_names[0])
27 input_tensor_id = input_binding_info[0]
28
29 input_tensor_info = input_binding_info[1]
30
31 output_names = parser.GetSubgraphOutputTensorNames(graph_id)
32
33 input_data = np.random.randint(255, size=input_tensor_info.GetNumElements(), dtype=np.uint8)
34
35 const_tensor_pair = (input_tensor_id, ann.ConstTensor(input_tensor_info, input_data))
36
37 input_tensors = [const_tensor_pair]
38
39 output_tensors = []
40
41 for index, output_name in enumerate(output_names):
42 out_bind_info = parser.GetNetworkOutputBindingInfo(graph_id, output_name)
43
44 out_tensor_info = out_bind_info[1]
45 out_tensor_id = out_bind_info[0]
46
47 output_tensors.append((out_tensor_id,
48 ann.Tensor(out_tensor_info)))
49
50 yield preferred_backends, network, runtime, input_tensors, output_tensors
51
52
53@pytest.fixture(scope='function')
54def mock_model_runtime(shared_data_folder):
55 parser = ann.ITfLiteParser()
56 network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, 'mock_model.tflite'))
57 graph_id = 0
58
59 input_binding_info = parser.GetNetworkInputBindingInfo(graph_id, "input_1")
60
61 input_tensor_data = np.load(os.path.join(shared_data_folder, 'tflite_parser/input_lite.npy'))
62
63 preferred_backends = [ann.BackendId('CpuRef')]
64
65 options = ann.CreationOptions()
66 runtime = ann.IRuntime(options)
67
68 opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
69
70 print(messages)
71
72 net_id, messages = runtime.LoadNetwork(opt_network)
73
74 print(messages)
75
76 input_tensors = ann.make_input_tensors([input_binding_info], [input_tensor_data])
77
78 output_names = parser.GetSubgraphOutputTensorNames(graph_id)
79 outputs_binding_info = []
80
81 for output_name in output_names:
82 outputs_binding_info.append(parser.GetNetworkOutputBindingInfo(graph_id, output_name))
83
84 output_tensors = ann.make_output_tensors(outputs_binding_info)
85
86 yield runtime, net_id, input_tensors, output_tensors
87
88
89def test_python_disowns_network(random_runtime):
90 preferred_backends = random_runtime[0]
91 network = random_runtime[1]
92 runtime = random_runtime[2]
93 opt_network, _ = ann.Optimize(network, preferred_backends,
94 runtime.GetDeviceSpec(), ann.OptimizerOptions())
95
96 runtime.LoadNetwork(opt_network)
97
98 assert not opt_network.thisown
99
Richard Burtondc0c6ed2020-04-08 16:39:05 +0100100def test_load_network(random_runtime):
101 preferred_backends = random_runtime[0]
102 network = random_runtime[1]
103 runtime = random_runtime[2]
104
105 opt_network, _ = ann.Optimize(network, preferred_backends,
106 runtime.GetDeviceSpec(), ann.OptimizerOptions())
107
108 net_id, messages = runtime.LoadNetwork(opt_network)
109 assert "" == messages
110 assert net_id == 0
111
Éanna Ó Catháin59da3692020-04-16 08:54:12 +0100112def test_create_runtime_with_external_profiling_enabled():
113
114 options = ann.CreationOptions()
115
116 options.m_ProfilingOptions.m_FileOnly = True
117 options.m_ProfilingOptions.m_EnableProfiling = True
118 options.m_ProfilingOptions.m_OutgoingCaptureFile = "/tmp/outgoing.txt"
119 options.m_ProfilingOptions.m_IncomingCaptureFile = "/tmp/incoming.txt"
120 options.m_ProfilingOptions.m_TimelineEnabled = True
121 options.m_ProfilingOptions.m_CapturePeriod = 1000
122 options.m_ProfilingOptions.m_FileFormat = "JSON"
123
124 runtime = ann.IRuntime(options)
125
126 assert runtime is not None
127
128def test_create_runtime_with_external_profiling_enabled_invalid_options():
129
130 options = ann.CreationOptions()
131
132 options.m_ProfilingOptions.m_FileOnly = True
133 options.m_ProfilingOptions.m_EnableProfiling = False
134 options.m_ProfilingOptions.m_OutgoingCaptureFile = "/tmp/outgoing.txt"
135 options.m_ProfilingOptions.m_IncomingCaptureFile = "/tmp/incoming.txt"
136 options.m_ProfilingOptions.m_TimelineEnabled = True
137 options.m_ProfilingOptions.m_CapturePeriod = 1000
138 options.m_ProfilingOptions.m_FileFormat = "JSON"
139
140 with pytest.raises(RuntimeError) as err:
141 runtime = ann.IRuntime(options)
142
143 expected_error_message = "It is not possible to enable timeline reporting without profiling being enabled"
144 assert expected_error_message in str(err.value)
145
Richard Burtondc0c6ed2020-04-08 16:39:05 +0100146
147def test_load_network_properties_provided(random_runtime):
148 preferred_backends = random_runtime[0]
149 network = random_runtime[1]
150 runtime = random_runtime[2]
151
152 opt_network, _ = ann.Optimize(network, preferred_backends,
153 runtime.GetDeviceSpec(), ann.OptimizerOptions())
154
155 properties = ann.INetworkProperties(True, True)
156 net_id, messages = runtime.LoadNetwork(opt_network, properties)
157 assert "" == messages
158 assert net_id == 0
159
Jan Eilers1b2654f2021-09-24 15:45:46 +0100160def test_network_properties_constructor(random_runtime):
161 preferred_backends = random_runtime[0]
162 network = random_runtime[1]
163 runtime = random_runtime[2]
164
165 opt_network, _ = ann.Optimize(network, preferred_backends,
166 runtime.GetDeviceSpec(), ann.OptimizerOptions())
167
168 inputSource = ann.MemorySource_Undefined
169 outputSource = ann.MemorySource_Undefined
170 properties = ann.INetworkProperties(True, inputSource, outputSource)
171 assert properties.m_AsyncEnabled == True
172 assert properties.m_ProfilingEnabled == False
173 assert properties.m_OutputNetworkDetailsMethod == ann.ProfilingDetailsMethod_Undefined
174 assert properties.m_InputSource == ann.MemorySource_Undefined
175 assert properties.m_OutputSource == ann.MemorySource_Undefined
176
177 net_id, messages = runtime.LoadNetwork(opt_network, properties)
178 assert "" == messages
179 assert net_id == 0
180
181def test_network_properties_deprecated_constructor():
182 with pytest.warns(DeprecationWarning):
183 warnings.warn("Deprecated: Use constructor with MemorySource argument instead.", DeprecationWarning)
Richard Burtondc0c6ed2020-04-08 16:39:05 +0100184
185def test_unload_network_fails_for_invalid_net_id(random_runtime):
186 preferred_backends = random_runtime[0]
187 network = random_runtime[1]
188 runtime = random_runtime[2]
189
190 ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions())
191
192 with pytest.raises(RuntimeError) as err:
193 runtime.UnloadNetwork(9)
194
195 expected_error_message = "Failed to unload network."
196 assert expected_error_message in str(err.value)
197
198
199def test_enqueue_workload(random_runtime):
200 preferred_backends = random_runtime[0]
201 network = random_runtime[1]
202 runtime = random_runtime[2]
203 input_tensors = random_runtime[3]
204 output_tensors = random_runtime[4]
205
206 opt_network, _ = ann.Optimize(network, preferred_backends,
207 runtime.GetDeviceSpec(), ann.OptimizerOptions())
208
209 net_id, _ = runtime.LoadNetwork(opt_network)
210 runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
211
212
213def test_enqueue_workload_fails_with_empty_input_tensors(random_runtime):
214 preferred_backends = random_runtime[0]
215 network = random_runtime[1]
216 runtime = random_runtime[2]
217 input_tensors = []
218 output_tensors = random_runtime[4]
219
220 opt_network, _ = ann.Optimize(network, preferred_backends,
221 runtime.GetDeviceSpec(), ann.OptimizerOptions())
222
223 net_id, _ = runtime.LoadNetwork(opt_network)
224 with pytest.raises(RuntimeError) as err:
225 runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
226
227 expected_error_message = "Number of inputs provided does not match network."
228 assert expected_error_message in str(err.value)
229
230
231@pytest.mark.x86_64
232@pytest.mark.parametrize('count', [5])
233def test_multiple_inference_runs_yield_same_result(count, mock_model_runtime):
234 """
235 Test that results remain consistent among multiple runs of the same inference.
236 """
237 runtime = mock_model_runtime[0]
238 net_id = mock_model_runtime[1]
239 input_tensors = mock_model_runtime[2]
240 output_tensors = mock_model_runtime[3]
241
242 expected_results = np.array([[4, 85, 108, 29, 8, 16, 0, 2, 5, 0]])
243
244 for _ in range(count):
245 runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
246
247 output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
248
249 for i in range(len(expected_results)):
250 assert output_vectors[i].all() == expected_results[i].all()
251
252
253@pytest.mark.aarch64
254def test_aarch64_inference_results(mock_model_runtime):
255
256 runtime = mock_model_runtime[0]
257 net_id = mock_model_runtime[1]
258 input_tensors = mock_model_runtime[2]
259 output_tensors = mock_model_runtime[3]
260
261 runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
262
263 output_vectors = ann.workload_tensors_to_ndarray(output_tensors)
264
265 expected_outputs = expected_results = np.array([[4, 85, 108, 29, 8, 16, 0, 2, 5, 0]])
266
267 for i in range(len(expected_outputs)):
268 assert output_vectors[i].all() == expected_results[i].all()
269
270
271def test_enqueue_workload_with_profiler(random_runtime):
272 """
273 Tests ArmNN's profiling extension
274 """
275 preferred_backends = random_runtime[0]
276 network = random_runtime[1]
277 runtime = random_runtime[2]
278 input_tensors = random_runtime[3]
279 output_tensors = random_runtime[4]
280
281 opt_network, _ = ann.Optimize(network, preferred_backends,
282 runtime.GetDeviceSpec(), ann.OptimizerOptions())
283 net_id, _ = runtime.LoadNetwork(opt_network)
284
285 profiler = runtime.GetProfiler(net_id)
286 # By default profiling should be turned off:
287 assert profiler.IsProfilingEnabled() is False
288
289 # Enable profiling:
290 profiler.EnableProfiling(True)
291 assert profiler.IsProfilingEnabled() is True
292
293 # Run the inference:
294 runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)
295
296 # Get profile output as a string:
297 str_profile = profiler.as_json()
298
299 # Verify that certain markers are present:
300 assert len(str_profile) != 0
301 assert str_profile.find('\"ArmNN\": {') > 0
302
303 # Get events analysis output as a string:
304 str_events_analysis = profiler.event_log()
305
306 assert "Event Sequence - Name | Duration (ms) | Start (ms) | Stop (ms) | Device" in str_events_analysis
307
308 assert profiler.thisown == 0
309
310
311def test_check_runtime_swig_ownership(random_runtime):
312 # Check to see that SWIG has ownership for runtime. This instructs SWIG to take
313 # ownership of the return value. This allows the value to be automatically
314 # garbage-collected when it is no longer in use
315 runtime = random_runtime[2]
316 assert runtime.thisown