blob: 8a4e85ee238cad532991571090135f0240524db9 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa014fcda012018-03-09 14:13:49 +00005
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +01006#include <armnn/Descriptors.hpp>
7#include <armnn/IRuntime.hpp>
8#include <armnn/INetwork.hpp>
9#include <armnn/Runtime.hpp>
10#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010012#include <armnnUtils/HeapProfiling.hpp>
13#include <armnnUtils/LeakChecking.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
15#ifdef WITH_VALGRIND
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010016#include <valgrind/memcheck.h>
telsoa014fcda012018-03-09 14:13:49 +000017#endif
18
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010019#include <boost/test/unit_test.hpp>
20
telsoa014fcda012018-03-09 14:13:49 +000021namespace armnn
22{
23
24void RuntimeLoadedNetworksReserve(armnn::Runtime* runtime)
25{
26 runtime->m_LoadedNetworks.reserve(1);
27}
28
29}
30
31BOOST_AUTO_TEST_SUITE(Runtime)
32
33BOOST_AUTO_TEST_CASE(RuntimeUnloadNetwork)
34{
35 // build 2 mock-networks and load them into the runtime
telsoa01c577f2c2018-08-31 09:22:23 +010036 armnn::IRuntime::CreationOptions options;
37 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
telsoa014fcda012018-03-09 14:13:49 +000038
telsoa01c577f2c2018-08-31 09:22:23 +010039 // Mock network 1.
telsoa014fcda012018-03-09 14:13:49 +000040 armnn::NetworkId networkIdentifier1 = 1;
41 armnn::INetworkPtr mockNetwork1(armnn::INetwork::Create());
42 mockNetwork1->AddInputLayer(0, "test layer");
David Beckf0b48452018-10-19 15:20:56 +010043 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
telsoa01c577f2c2018-08-31 09:22:23 +010044 runtime->LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime->GetDeviceSpec()));
telsoa014fcda012018-03-09 14:13:49 +000045
telsoa01c577f2c2018-08-31 09:22:23 +010046 // Mock network 2.
telsoa014fcda012018-03-09 14:13:49 +000047 armnn::NetworkId networkIdentifier2 = 2;
48 armnn::INetworkPtr mockNetwork2(armnn::INetwork::Create());
49 mockNetwork2->AddInputLayer(0, "test layer");
telsoa01c577f2c2018-08-31 09:22:23 +010050 runtime->LoadNetwork(networkIdentifier2, Optimize(*mockNetwork2, backends, runtime->GetDeviceSpec()));
telsoa014fcda012018-03-09 14:13:49 +000051
telsoa01c577f2c2018-08-31 09:22:23 +010052 // Unloads one by its networkID.
telsoa014fcda012018-03-09 14:13:49 +000053 BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Success);
54
55 BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Failure);
56}
57
surmeh013537c2c2018-05-18 16:31:43 +010058// Note: the current builds we don't do valgrind and gperftools based leak checking at the same
telsoa01c577f2c2018-08-31 09:22:23 +010059// time, so in practice WITH_VALGRIND and ARMNN_LEAK_CHECKING_ENABLED are exclusive. The
60// valgrind tests can stay for x86 builds, but on hikey Valgrind is just way too slow
61// to be integrated into the CI system.
surmeh013537c2c2018-05-18 16:31:43 +010062
telsoa01c577f2c2018-08-31 09:22:23 +010063#ifdef ARMNN_LEAK_CHECKING_ENABLED
64
65struct DisableGlobalLeakChecking
66{
67 DisableGlobalLeakChecking()
68 {
69 ARMNN_LOCAL_LEAK_CHECKING_ONLY();
70 }
71};
72
73BOOST_GLOBAL_FIXTURE(DisableGlobalLeakChecking);
74
surmeh013537c2c2018-05-18 16:31:43 +010075BOOST_AUTO_TEST_CASE(RuntimeHeapMemoryUsageSanityChecks)
76{
77 BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
78 {
79 ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Outer");
80 {
81 ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Inner");
telsoa01c577f2c2018-08-31 09:22:23 +010082 BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE() == true);
surmeh013537c2c2018-05-18 16:31:43 +010083 std::unique_ptr<char[]> dummyAllocation(new char[1000]);
telsoa01c577f2c2018-08-31 09:22:23 +010084 BOOST_CHECK_MESSAGE(ARMNN_NO_LEAKS_IN_SCOPE() == false,
85 "A leak of 1000 bytes is expected here. "
86 "Please make sure environment variable: HEAPCHECK=draconian is set!");
87 BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 1000);
88 BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 1);
surmeh013537c2c2018-05-18 16:31:43 +010089 }
90 BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
91 BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
92 BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
93 }
94}
95
surmeh013537c2c2018-05-18 16:31:43 +010096#endif // ARMNN_LEAK_CHECKING_ENABLED
97
98// Note: this part of the code is due to be removed when we fully trust the gperftools based results.
telsoa014fcda012018-03-09 14:13:49 +000099#ifdef WITH_VALGRIND
telsoa01c577f2c2018-08-31 09:22:23 +0100100// Run with the following command to get all the amazing output (in the devenv/build folder) :)
telsoa014fcda012018-03-09 14:13:49 +0000101// valgrind --leak-check=full --show-leak-kinds=all --log-file=Valgrind_Memcheck_Leak_Report.txt armnn/test/UnitTests
102BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
103{
104 // From documentation:
105
106 // This means that no pointer to the block can be found. The block is classified as "lost",
107 // because the programmer could not possibly have freed it at program exit, since no pointer to it exists.
108 unsigned long leakedBefore = 0;
109 unsigned long leakedAfter = 0;
110
111 // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at,
112 // the programmer could, at least in principle, have freed it before program exit.
telsoa01c577f2c2018-08-31 09:22:23 +0100113 // We want to test this in case memory is not freed as early as it could have been.
telsoa014fcda012018-03-09 14:13:49 +0000114 unsigned long reachableBefore = 0;
115 unsigned long reachableAfter = 0;
116
telsoa01c577f2c2018-08-31 09:22:23 +0100117 // Needed as out params but we don't test them.
telsoa014fcda012018-03-09 14:13:49 +0000118 unsigned long dubious = 0;
119 unsigned long suppressed = 0;
120
121 armnn::NetworkId networkIdentifier1 = 1;
122
123 // ensure that runtime is large enough before checking for memory leaks
124 // otherwise when loading the network it will automatically reserve memory that won't be released until destruction
telsoa01c577f2c2018-08-31 09:22:23 +0100125 armnn::IRuntime::CreationOptions options;
126 armnn::Runtime runtime(options);
telsoa014fcda012018-03-09 14:13:49 +0000127 armnn::RuntimeLoadedNetworksReserve(&runtime);
128
telsoa01c577f2c2018-08-31 09:22:23 +0100129 // Checks for leaks before we load the network and record them so that we can see the delta after unloading.
telsoa014fcda012018-03-09 14:13:49 +0000130 VALGRIND_DO_QUICK_LEAK_CHECK;
131 VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed);
132
telsoa01c577f2c2018-08-31 09:22:23 +0100133 // Builds a mock-network and load it into the runtime.
telsoa014fcda012018-03-09 14:13:49 +0000134 {
135 unsigned int inputShape[] = {1, 7, 1, 1};
136 armnn::TensorInfo inputTensorInfo(4, inputShape, armnn::DataType::Float32);
137
138 std::unique_ptr<armnn::Network> mockNetwork1 = std::make_unique<armnn::Network>();
139 mockNetwork1->AddInputLayer(0, "test layer");
140
telsoa014fcda012018-03-09 14:13:49 +0000141
David Beckf0b48452018-10-19 15:20:56 +0100142 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
telsoa01c577f2c2018-08-31 09:22:23 +0100143 runtime.LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime.GetDeviceSpec()));
telsoa014fcda012018-03-09 14:13:49 +0000144 }
145
146 runtime.UnloadNetwork(networkIdentifier1);
147
148 VALGRIND_DO_ADDED_LEAK_CHECK;
149 VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151 // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +0100152 BOOST_TEST(leakedBefore == leakedAfter);
153 BOOST_TEST(reachableBefore == reachableAfter);
telsoa014fcda012018-03-09 14:13:49 +0000154
telsoa01c577f2c2018-08-31 09:22:23 +0100155 // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
156 // so they are assigned to, but still considered unused, causing a warning.
telsoa014fcda012018-03-09 14:13:49 +0000157 boost::ignore_unused(dubious);
158 boost::ignore_unused(suppressed);
159}
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +0100160#endif // WITH_VALGRIND
telsoa01c577f2c2018-08-31 09:22:23 +0100161
162BOOST_AUTO_TEST_CASE(RuntimeCpuRef)
163{
164 using namespace armnn;
165
166 // Create runtime in which test will run
167 armnn::IRuntime::CreationOptions options;
168 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
169
170 // build up the structure of the network
171 INetworkPtr net(INetwork::Create());
172
173 IConnectableLayer* input = net->AddInputLayer(0);
174
175 // This layer configuration isn't supported by CpuAcc, should be fall back to CpuRef.
176 NormalizationDescriptor descriptor;
177 IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
178
179 IConnectableLayer* output = net->AddOutputLayer(0);
180
181 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
182 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
183
184 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
185 normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
186
187 // optimize the network
David Beckf0b48452018-10-19 15:20:56 +0100188 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
telsoa01c577f2c2018-08-31 09:22:23 +0100189 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
190
191 // Load it into the runtime. It should success.
192 armnn::NetworkId netId;
193 BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
194}
195
196BOOST_AUTO_TEST_CASE(RuntimeFallbackToCpuRef)
197{
198 using namespace armnn;
199
200 // Create runtime in which test will run
201 armnn::IRuntime::CreationOptions options;
202 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
203
204 // build up the structure of the network
205 INetworkPtr net(INetwork::Create());
206
207 IConnectableLayer* input = net->AddInputLayer(0);
208
209 // This layer configuration isn't supported by CpuAcc, should be fall back to CpuRef.
210 NormalizationDescriptor descriptor;
211 IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
212
213 IConnectableLayer* output = net->AddOutputLayer(0);
214
215 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
216 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
217
218 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
219 normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
220
221 // Allow fallback to CpuRef.
David Beckf0b48452018-10-19 15:20:56 +0100222 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
telsoa01c577f2c2018-08-31 09:22:23 +0100223 // optimize the network
224 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
225
226 // Load it into the runtime. It should succeed.
227 armnn::NetworkId netId;
228 BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
229}
230
jimfly016b0b53d2018-10-08 14:43:01 +0100231BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
232{
233 // Test for issue reported by Chris Nix in https://jira.arm.com/browse/IVGCVSW-1929
234 using namespace armnn;
235
236 // Create runtime in which test will run
237 armnn::IRuntime::CreationOptions options;
238 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
239
240 // build up the structure of the network
241 INetworkPtr net(INetwork::Create());
242 armnn::IConnectableLayer* input = net->AddInputLayer(
243 0,
244 "input"
245 );
246 armnn::IConnectableLayer* softmax = net->AddSoftmaxLayer(
247 armnn::SoftmaxDescriptor(),
248 "softmax"
249 );
250 armnn::IConnectableLayer* output = net->AddOutputLayer(
251 0,
252 "output"
253 );
254
255 input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
256 softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
257
258 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
259 armnn::TensorShape({ 1, 5 }),
260 armnn::DataType::QuantisedAsymm8,
261 1.0f/255,
262 0
263 ));
264
265 softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
266 armnn::TensorShape({ 1, 5 }),
267 armnn::DataType::QuantisedAsymm8
268 ));
269
David Beckf0b48452018-10-19 15:20:56 +0100270 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
jimfly016b0b53d2018-10-08 14:43:01 +0100271 std::vector<std::string> errMessages;
272 armnn::IOptimizedNetworkPtr optNet = Optimize(
273 *net,
274 backends,
275 runtime->GetDeviceSpec(),
276 OptimizerOptions(),
277 errMessages
278 );
David Beckf0b48452018-10-19 15:20:56 +0100279
jimfly016b0b53d2018-10-08 14:43:01 +0100280 BOOST_TEST(errMessages.size() == 1);
281 BOOST_TEST(errMessages[0] ==
282 "ERROR: output 0 of layer Softmax (softmax) is of type "
283 "Quantized 8 bit but its scale parameter has not been set");
284 BOOST_TEST(!optNet);
285}
286
telsoa014fcda012018-03-09 14:13:49 +0000287BOOST_AUTO_TEST_SUITE_END()