blob: 3bdd48bcfa32d05f72704b104f530f7c06f6d141 [file] [log] [blame]
Aron Virginas-Tar70104002018-10-24 15:33:28 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#pragma once
6
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01007#include <ResolveType.hpp>
Nattapat Chaimanowong1fcb4ff2019-01-24 15:25:26 +00008
Aron Virginas-Tar70104002018-10-24 15:33:28 +01009#include <armnn/ArmNN.hpp>
narpra01b9546cf2018-11-20 15:21:28 +000010#include <armnn/INetwork.hpp>
Ferran Balaguerdcaa6102019-08-21 13:28:38 +010011#include <Profiling.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +010012
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000013#include <backendsCommon/test/QuantizeHelper.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +010014
narpra01b9546cf2018-11-20 15:21:28 +000015#include <boost/test/unit_test.hpp>
16
Aron Virginas-Tar70104002018-10-24 15:33:28 +010017#include <vector>
18
19namespace
20{
21
22using namespace armnn;
23
24template<typename T>
25bool ConstantUsageTest(const std::vector<BackendId>& computeDevice,
26 const TensorInfo& commonTensorInfo,
27 const std::vector<T>& inputData,
28 const std::vector<T>& constantData,
29 const std::vector<T>& expectedOutputData)
30{
31 // Create runtime in which test will run
32 IRuntime::CreationOptions options;
33 IRuntimePtr runtime(IRuntime::Create(options));
34
35 // Builds up the structure of the network.
36 INetworkPtr net(INetwork::Create());
37
38 IConnectableLayer* input = net->AddInputLayer(0);
39 IConnectableLayer* constant = net->AddConstantLayer(ConstTensor(commonTensorInfo, constantData));
40 IConnectableLayer* add = net->AddAdditionLayer();
41 IConnectableLayer* output = net->AddOutputLayer(0);
42
43 input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
44 constant->GetOutputSlot(0).Connect(add->GetInputSlot(1));
45 add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
46
47 // Sets the tensors in the network.
48 input->GetOutputSlot(0).SetTensorInfo(commonTensorInfo);
49 constant->GetOutputSlot(0).SetTensorInfo(commonTensorInfo);
50 add->GetOutputSlot(0).SetTensorInfo(commonTensorInfo);
51
52 // optimize the network
53 IOptimizedNetworkPtr optNet = Optimize(*net, computeDevice, runtime->GetDeviceSpec());
54
55 // Loads it into the runtime.
56 NetworkId netId;
57 runtime->LoadNetwork(netId, std::move(optNet));
58
59 // Creates structures for input & output.
60 std::vector<T> outputData(inputData.size());
61
62 InputTensors inputTensors
63 {
64 {0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
65 };
66 OutputTensors outputTensors
67 {
68 {0, Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
69 };
70
71 // Does the inference.
72 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
73
74 // Checks the results.
75 return outputData == expectedOutputData;
76}
77
78inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends)
79{
80 const TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
81
82 return ConstantUsageTest(backends,
83 commonTensorInfo,
84 std::vector<float>{ 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }, // Input.
85 std::vector<float>{ 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }, // Const input.
86 std::vector<float>{ 7.f, 7.f, 7.f, 7.f, 7.f, 7.f } // Expected output.
87 );
88}
89
90inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
91{
92 TensorInfo commonTensorInfo({ 2, 3 }, DataType::QuantisedAsymm8);
93
94 const float scale = 0.023529f;
95 const int8_t offset = -43;
96
97 commonTensorInfo.SetQuantizationScale(scale);
98 commonTensorInfo.SetQuantizationOffset(offset);
99
100 return ConstantUsageTest(backends,
101 commonTensorInfo,
102 QuantizedVector<uint8_t>(scale, offset, { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }), // Input.
103 QuantizedVector<uint8_t>(scale, offset, { 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }), // Const input.
104 QuantizedVector<uint8_t>(scale, offset, { 7.f, 7.f, 7.f, 7.f, 7.f, 7.f }) // Expected output.
105 );
106}
107
Nattapat Chaimanowong1fcb4ff2019-01-24 15:25:26 +0000108template<typename T>
109bool CompareBoolean(T a, T b)
110{
111 return (a == 0 && b == 0) ||(a != 0 && b != 0);
112};
113
114template<DataType ArmnnIType, DataType ArmnnOType,
115 typename TInput = ResolveType<ArmnnIType>, typename TOutput = ResolveType<ArmnnOType>>
narpra01b9546cf2018-11-20 15:21:28 +0000116void EndToEndLayerTestImpl(INetworkPtr network,
kevmay012b4d88e2019-01-24 14:05:09 +0000117 const std::map<int, std::vector<TInput>>& inputTensorData,
118 const std::map<int, std::vector<TOutput>>& expectedOutputData,
narpra01b9546cf2018-11-20 15:21:28 +0000119 std::vector<BackendId> backends)
120{
121 // Create runtime in which test will run
122 IRuntime::CreationOptions options;
123 IRuntimePtr runtime(IRuntime::Create(options));
124
125 // optimize the network
126 IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec());
127
128 // Loads it into the runtime.
129 NetworkId netId;
130 runtime->LoadNetwork(netId, std::move(optNet));
131
132 InputTensors inputTensors;
133 inputTensors.reserve(inputTensorData.size());
134 for (auto&& it : inputTensorData)
135 {
136 inputTensors.push_back({it.first,
137 ConstTensor(runtime->GetInputTensorInfo(netId, it.first), it.second.data())});
138 }
139 OutputTensors outputTensors;
140 outputTensors.reserve(expectedOutputData.size());
kevmay012b4d88e2019-01-24 14:05:09 +0000141 std::map<int, std::vector<TOutput>> outputStorage;
narpra01b9546cf2018-11-20 15:21:28 +0000142 for (auto&& it : expectedOutputData)
143 {
kevmay012b4d88e2019-01-24 14:05:09 +0000144 std::vector<TOutput> out(it.second.size());
narpra01b9546cf2018-11-20 15:21:28 +0000145 outputStorage.emplace(it.first, out);
146 outputTensors.push_back({it.first,
147 Tensor(runtime->GetOutputTensorInfo(netId, it.first),
148 outputStorage.at(it.first).data())});
149 }
150
151 // Does the inference.
152 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
153
154 // Checks the results.
155 for (auto&& it : expectedOutputData)
156 {
kevmay012b4d88e2019-01-24 14:05:09 +0000157 std::vector<TOutput> out = outputStorage.at(it.first);
Nattapat Chaimanowong1fcb4ff2019-01-24 15:25:26 +0000158 if (ArmnnOType == DataType::Boolean)
159 {
160 for (unsigned int i = 0; i < out.size(); ++i)
161 {
162 BOOST_TEST(CompareBoolean<TOutput>(it.second[i], out[i]));
163 }
164 }
165 else
166 {
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +0000167 for (unsigned int i = 0; i < out.size(); ++i)
168 {
169 BOOST_TEST(it.second[i] == out[i], boost::test_tools::tolerance(0.000001f));
170 }
Nattapat Chaimanowong1fcb4ff2019-01-24 15:25:26 +0000171 }
narpra01b9546cf2018-11-20 15:21:28 +0000172 }
173}
174
David Monahan4f1e8e42019-09-04 09:22:10 +0100175inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends)
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100176{
177 using namespace armnn;
178
179 // Create runtime in which test will run
180 IRuntime::CreationOptions options;
181 IRuntimePtr runtime(armnn::IRuntime::Create(options));
182
183 // build up the structure of the network
184 INetworkPtr net(INetwork::Create());
185
186 IConnectableLayer* input = net->AddInputLayer(0);
187
David Monahan3fb7e102019-08-20 11:25:29 +0100188 ActivationDescriptor descriptor;
189 descriptor.m_Function = ActivationFunction::Square;
190 IConnectableLayer* pooling = net->AddActivationLayer(descriptor);
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100191
192 IConnectableLayer* output = net->AddOutputLayer(0);
193
David Monahan3fb7e102019-08-20 11:25:29 +0100194 input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
195 pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100196
David Monahan3fb7e102019-08-20 11:25:29 +0100197 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
198 pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100199
200 // Optimize the network
201 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
David Monahan3fb7e102019-08-20 11:25:29 +0100202 BOOST_CHECK(optNet);
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100203
204 // Loads it into the runtime.
205 NetworkId netId;
David Monahan4f1e8e42019-09-04 09:22:10 +0100206 std::string ignoredErrorMessage;
207 // Enable Importing
David Monahan3fb7e102019-08-20 11:25:29 +0100208 INetworkProperties networkProperties(true, false);
David Monahan4f1e8e42019-09-04 09:22:10 +0100209 runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100210
211 // Creates structures for input & output
212 std::vector<float> inputData
213 {
David Monahan3fb7e102019-08-20 11:25:29 +0100214 1.0f, 2.0f, 3.0f, 4.0f
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100215 };
216
217 // Misaligned input
Aron Virginas-Tard9f7c8b2019-09-13 13:37:03 +0100218 float* misalignedInputData = reinterpret_cast<float*>(reinterpret_cast<char*>(inputData.data()) + 1);
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100219
David Monahan3fb7e102019-08-20 11:25:29 +0100220 std::vector<float> outputData(4);
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100221
David Monahan4f1e8e42019-09-04 09:22:10 +0100222 // Aligned output
David Monahan3fb7e102019-08-20 11:25:29 +0100223 float* alignedOutputData = outputData.data();
David Monahan4f1e8e42019-09-04 09:22:10 +0100224
225 InputTensors inputTensors
226 {
227 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), misalignedInputData)},
228 };
229 OutputTensors outputTensors
230 {
231 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), alignedOutputData)}
232 };
233
David Monahan4f1e8e42019-09-04 09:22:10 +0100234 runtime->GetProfiler(netId)->EnableProfiling(true);
235
236 // Do the inference and expect it to fail with a ImportMemoryException
237 BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException);
238}
239
240inline void ImportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
241{
242 using namespace armnn;
243
244 // Create runtime in which test will run
245 IRuntime::CreationOptions options;
246 IRuntimePtr runtime(armnn::IRuntime::Create(options));
247
248 // build up the structure of the network
249 INetworkPtr net(INetwork::Create());
250
251 IConnectableLayer* input = net->AddInputLayer(0);
252
David Monahan3fb7e102019-08-20 11:25:29 +0100253 ActivationDescriptor descriptor;
254 descriptor.m_Function = ActivationFunction::Square;
255 IConnectableLayer* pooling = net->AddActivationLayer(descriptor);
David Monahan4f1e8e42019-09-04 09:22:10 +0100256
257 IConnectableLayer* output = net->AddOutputLayer(0);
258
David Monahan3fb7e102019-08-20 11:25:29 +0100259 input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
260 pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
David Monahan4f1e8e42019-09-04 09:22:10 +0100261
David Monahan3fb7e102019-08-20 11:25:29 +0100262 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
263 pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
David Monahan4f1e8e42019-09-04 09:22:10 +0100264
265 // Optimize the network
266 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
David Monahan3fb7e102019-08-20 11:25:29 +0100267 BOOST_CHECK(optNet);
David Monahan4f1e8e42019-09-04 09:22:10 +0100268
269 // Loads it into the runtime.
270 NetworkId netId;
271 std::string ignoredErrorMessage;
David Monahan3fb7e102019-08-20 11:25:29 +0100272 // Enable Importing and Exporting
David Monahan4f1e8e42019-09-04 09:22:10 +0100273 INetworkProperties networkProperties(true, true);
274 runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
275
276 // Creates structures for input & output
277 std::vector<float> inputData
278 {
279 1.0f, 2.0f, 3.0f, 4.0f, 5.0f
280 };
281
282 // Aligned input
David Monahan3fb7e102019-08-20 11:25:29 +0100283 float* alignedInputData = inputData.data();
David Monahan4f1e8e42019-09-04 09:22:10 +0100284
285 std::vector<float> outputData(5);
286
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100287 // Misaligned output
Aron Virginas-Tard9f7c8b2019-09-13 13:37:03 +0100288 float* misalignedOutputData = reinterpret_cast<float*>(reinterpret_cast<char*>(outputData.data()) + 1);
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100289
290 InputTensors inputTensors
291 {
David Monahan4f1e8e42019-09-04 09:22:10 +0100292 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), alignedInputData)},
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100293 };
294 OutputTensors outputTensors
295 {
296 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), misalignedOutputData)}
297 };
298
David Monahan4f1e8e42019-09-04 09:22:10 +0100299 // Do the inference and expect it to fail with a ImportMemoryException
300 BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryExportException);
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100301}
302
303inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
304{
305 using namespace armnn;
306
307 // Create runtime in which test will run
308 IRuntime::CreationOptions options;
309 IRuntimePtr runtime(armnn::IRuntime::Create(options));
310
311 // build up the structure of the network
312 INetworkPtr net(INetwork::Create());
313
314 IConnectableLayer* input = net->AddInputLayer(0);
315
David Monahan3fb7e102019-08-20 11:25:29 +0100316 ActivationDescriptor descriptor;
317 descriptor.m_Function = ActivationFunction::Square;
318 IConnectableLayer* pooling = net->AddActivationLayer(descriptor);
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100319
320 IConnectableLayer* output = net->AddOutputLayer(0);
321
David Monahan3fb7e102019-08-20 11:25:29 +0100322 input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
323 pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100324
David Monahan3fb7e102019-08-20 11:25:29 +0100325 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
326 pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100327
328 // Optimize the network
329 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
David Monahan3fb7e102019-08-20 11:25:29 +0100330 BOOST_CHECK(optNet);
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100331
332 // Loads it into the runtime.
333 NetworkId netId;
David Monahan4f1e8e42019-09-04 09:22:10 +0100334 std::string ignoredErrorMessage;
335 // Enable Importing
336 INetworkProperties networkProperties(true, true);
337 runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100338
339 // Creates structures for input & output
340 std::vector<float> inputData
341 {
342 1.0f, 2.0f, 3.0f, 4.0f
343 };
344
345 std::vector<float> outputData(4);
346
347 InputTensors inputTensors
348 {
349 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
350 };
351 OutputTensors outputTensors
352 {
353 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
354 };
355
356 // The result of the inference is not important, just the fact that there
357 // should not be CopyMemGeneric workloads.
358 runtime->GetProfiler(netId)->EnableProfiling(true);
359
360 // Do the inference
361 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
362
363 // Retrieve the Profiler.Print() output to get the workload execution
364 ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
365 std::stringstream ss;
366 profilerManager.GetProfiler()->Print(ss);;
367 std::string dump = ss.str();
368
David Monahan3fb7e102019-08-20 11:25:29 +0100369 // Contains ActivationWorkload
370 std::size_t found = dump.find("ActivationWorkload");
Ferran Balaguerdcaa6102019-08-21 13:28:38 +0100371 BOOST_TEST(found != std::string::npos);
372 // Contains SyncMemGeneric
373 found = dump.find("SyncMemGeneric");
374 BOOST_TEST(found != std::string::npos);
375 // No contains CopyMemGeneric
376 found = dump.find("CopyMemGeneric");
377 BOOST_TEST(found == std::string::npos);
378}
379
Nattapat Chaimanowong1fcb4ff2019-01-24 15:25:26 +0000380} // anonymous namespace