blob: 76f5774a49f05f821f82b791e2170478ea112d27 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include <boost/test/unit_test.hpp>
6
7#include "armnn/TypesUtils.hpp"
8
9#include "armnn/IRuntime.hpp"
10#include "armnn/INetwork.hpp"
11#include "armnn/Descriptors.hpp"
12#include "Runtime.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010013#include "HeapProfiling.hpp"
14#include "LeakChecking.hpp"
telsoa014fcda012018-03-09 14:13:49 +000015
16#ifdef WITH_VALGRIND
17#include "valgrind/memcheck.h"
18#endif
19
telsoa014fcda012018-03-09 14:13:49 +000020namespace armnn
21{
22
23void RuntimeLoadedNetworksReserve(armnn::Runtime* runtime)
24{
25 runtime->m_LoadedNetworks.reserve(1);
26}
27
28}
29
30BOOST_AUTO_TEST_SUITE(Runtime)
31
32BOOST_AUTO_TEST_CASE(RuntimeUnloadNetwork)
33{
34 // build 2 mock-networks and load them into the runtime
telsoa01c577f2c2018-08-31 09:22:23 +010035 armnn::IRuntime::CreationOptions options;
36 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
telsoa014fcda012018-03-09 14:13:49 +000037
telsoa01c577f2c2018-08-31 09:22:23 +010038 // Mock network 1.
telsoa014fcda012018-03-09 14:13:49 +000039 armnn::NetworkId networkIdentifier1 = 1;
40 armnn::INetworkPtr mockNetwork1(armnn::INetwork::Create());
41 mockNetwork1->AddInputLayer(0, "test layer");
David Beckf0b48452018-10-19 15:20:56 +010042 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
telsoa01c577f2c2018-08-31 09:22:23 +010043 runtime->LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime->GetDeviceSpec()));
telsoa014fcda012018-03-09 14:13:49 +000044
telsoa01c577f2c2018-08-31 09:22:23 +010045 // Mock network 2.
telsoa014fcda012018-03-09 14:13:49 +000046 armnn::NetworkId networkIdentifier2 = 2;
47 armnn::INetworkPtr mockNetwork2(armnn::INetwork::Create());
48 mockNetwork2->AddInputLayer(0, "test layer");
telsoa01c577f2c2018-08-31 09:22:23 +010049 runtime->LoadNetwork(networkIdentifier2, Optimize(*mockNetwork2, backends, runtime->GetDeviceSpec()));
telsoa014fcda012018-03-09 14:13:49 +000050
telsoa01c577f2c2018-08-31 09:22:23 +010051 // Unloads one by its networkID.
telsoa014fcda012018-03-09 14:13:49 +000052 BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Success);
53
54 BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Failure);
55}
56
surmeh013537c2c2018-05-18 16:31:43 +010057// Note: the current builds we don't do valgrind and gperftools based leak checking at the same
telsoa01c577f2c2018-08-31 09:22:23 +010058// time, so in practice WITH_VALGRIND and ARMNN_LEAK_CHECKING_ENABLED are exclusive. The
59// valgrind tests can stay for x86 builds, but on hikey Valgrind is just way too slow
60// to be integrated into the CI system.
surmeh013537c2c2018-05-18 16:31:43 +010061
telsoa01c577f2c2018-08-31 09:22:23 +010062#ifdef ARMNN_LEAK_CHECKING_ENABLED
63
64struct DisableGlobalLeakChecking
65{
66 DisableGlobalLeakChecking()
67 {
68 ARMNN_LOCAL_LEAK_CHECKING_ONLY();
69 }
70};
71
72BOOST_GLOBAL_FIXTURE(DisableGlobalLeakChecking);
73
David Beckf0b48452018-10-19 15:20:56 +010074void CreateAndDropDummyNetwork(const std::vector<armnn::BackendId>& backends, armnn::Runtime& runtime)
surmeh013537c2c2018-05-18 16:31:43 +010075{
76 armnn::NetworkId networkIdentifier;
77 {
78 armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
79 armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
80
81 armnn::INetworkPtr network(armnn::INetwork::Create());
82
83 armnn::IConnectableLayer* input = network->AddInputLayer(0, "input");
84 armnn::IConnectableLayer* layer = network->AddActivationLayer(armnn::ActivationDescriptor(), "test");
85 armnn::IConnectableLayer* output = network->AddOutputLayer(0, "output");
86
87 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
88 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
89
telsoa01c577f2c2018-08-31 09:22:23 +010090 // Sets the tensors in the network.
surmeh013537c2c2018-05-18 16:31:43 +010091 input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
92 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
93
94 // optimize the network
telsoa01c577f2c2018-08-31 09:22:23 +010095 armnn::IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime.GetDeviceSpec());
surmeh013537c2c2018-05-18 16:31:43 +010096
97 runtime.LoadNetwork(networkIdentifier, std::move(optNet));
98 }
99
100 runtime.UnloadNetwork(networkIdentifier);
101}
102
103BOOST_AUTO_TEST_CASE(RuntimeHeapMemoryUsageSanityChecks)
104{
105 BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
106 {
107 ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Outer");
108 {
109 ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Inner");
telsoa01c577f2c2018-08-31 09:22:23 +0100110 BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE() == true);
surmeh013537c2c2018-05-18 16:31:43 +0100111 std::unique_ptr<char[]> dummyAllocation(new char[1000]);
telsoa01c577f2c2018-08-31 09:22:23 +0100112 BOOST_CHECK_MESSAGE(ARMNN_NO_LEAKS_IN_SCOPE() == false,
113 "A leak of 1000 bytes is expected here. "
114 "Please make sure environment variable: HEAPCHECK=draconian is set!");
115 BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 1000);
116 BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 1);
surmeh013537c2c2018-05-18 16:31:43 +0100117 }
118 BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
119 BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
120 BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
121 }
122}
123
124#ifdef ARMCOMPUTECL_ENABLED
125BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksGpuAcc)
126{
127 BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
telsoa01c577f2c2018-08-31 09:22:23 +0100128 armnn::IRuntime::CreationOptions options;
129 armnn::Runtime runtime(options);
surmeh013537c2c2018-05-18 16:31:43 +0100130 armnn::RuntimeLoadedNetworksReserve(&runtime);
131
David Beckf0b48452018-10-19 15:20:56 +0100132 std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
surmeh013537c2c2018-05-18 16:31:43 +0100133 {
134 // Do a warmup of this so we make sure that all one-time
135 // initialization happens before we do the leak checking.
telsoa01c577f2c2018-08-31 09:22:23 +0100136 CreateAndDropDummyNetwork(backends, runtime);
surmeh013537c2c2018-05-18 16:31:43 +0100137 }
138
139 {
140 ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkGpuAcc");
telsoa01c577f2c2018-08-31 09:22:23 +0100141 BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
surmeh013537c2c2018-05-18 16:31:43 +0100142 // In the second run we check for all remaining memory
143 // in use after the network was unloaded. If there is any
144 // then it will be treated as a memory leak.
telsoa01c577f2c2018-08-31 09:22:23 +0100145 CreateAndDropDummyNetwork(backends, runtime);
surmeh013537c2c2018-05-18 16:31:43 +0100146 BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
147 BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
148 BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
149 }
150}
151#endif // ARMCOMPUTECL_ENABLED
152
153#ifdef ARMCOMPUTENEON_ENABLED
154BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuAcc)
155{
156 BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
telsoa01c577f2c2018-08-31 09:22:23 +0100157 armnn::IRuntime::CreationOptions options;
158 armnn::Runtime runtime(options);
surmeh013537c2c2018-05-18 16:31:43 +0100159 armnn::RuntimeLoadedNetworksReserve(&runtime);
160
David Beckf0b48452018-10-19 15:20:56 +0100161 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
surmeh013537c2c2018-05-18 16:31:43 +0100162 {
163 // Do a warmup of this so we make sure that all one-time
164 // initialization happens before we do the leak checking.
telsoa01c577f2c2018-08-31 09:22:23 +0100165 CreateAndDropDummyNetwork(backends, runtime);
surmeh013537c2c2018-05-18 16:31:43 +0100166 }
167
168 {
169 ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuAcc");
telsoa01c577f2c2018-08-31 09:22:23 +0100170 BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
surmeh013537c2c2018-05-18 16:31:43 +0100171 // In the second run we check for all remaining memory
172 // in use after the network was unloaded. If there is any
173 // then it will be treated as a memory leak.
telsoa01c577f2c2018-08-31 09:22:23 +0100174 CreateAndDropDummyNetwork(backends, runtime);
surmeh013537c2c2018-05-18 16:31:43 +0100175 BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
176 BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
177 BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
178 }
179}
180#endif // ARMCOMPUTENEON_ENABLED
181
182BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuRef)
183{
184 BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
185
telsoa01c577f2c2018-08-31 09:22:23 +0100186 armnn::IRuntime::CreationOptions options;
187 armnn::Runtime runtime(options);
surmeh013537c2c2018-05-18 16:31:43 +0100188 armnn::RuntimeLoadedNetworksReserve(&runtime);
189
David Beckf0b48452018-10-19 15:20:56 +0100190 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
surmeh013537c2c2018-05-18 16:31:43 +0100191 {
192 // Do a warmup of this so we make sure that all one-time
193 // initialization happens before we do the leak checking.
telsoa01c577f2c2018-08-31 09:22:23 +0100194 CreateAndDropDummyNetwork(backends, runtime);
surmeh013537c2c2018-05-18 16:31:43 +0100195 }
196
197 {
198 ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuRef");
telsoa01c577f2c2018-08-31 09:22:23 +0100199 BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
surmeh013537c2c2018-05-18 16:31:43 +0100200 // In the second run we check for all remaining memory
201 // in use after the network was unloaded. If there is any
202 // then it will be treated as a memory leak.
telsoa01c577f2c2018-08-31 09:22:23 +0100203 CreateAndDropDummyNetwork(backends, runtime);
surmeh013537c2c2018-05-18 16:31:43 +0100204 BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
205 BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
206 BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
207 }
208}
209
210#endif // ARMNN_LEAK_CHECKING_ENABLED
211
212// Note: this part of the code is due to be removed when we fully trust the gperftools based results.
telsoa014fcda012018-03-09 14:13:49 +0000213#if defined(ARMCOMPUTECL_ENABLED) && defined(WITH_VALGRIND)
214BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage)
215{
216 // From documentation:
217
218 // This means that no pointer to the block can be found. The block is classified as "lost",
219 // because the programmer could not possibly have freed it at program exit, since no pointer to it exists.
220 unsigned long leakedBefore = 0;
221 unsigned long leakedAfter = 0;
222
223 // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at,
224 // the programmer could, at least in principle, have freed it before program exit.
telsoa01c577f2c2018-08-31 09:22:23 +0100225 // We want to test this in case memory is not freed as early as it could have been.
telsoa014fcda012018-03-09 14:13:49 +0000226 unsigned long reachableBefore = 0;
227 unsigned long reachableAfter = 0;
228
telsoa01c577f2c2018-08-31 09:22:23 +0100229 // Needed as out params but we don't test them.
telsoa014fcda012018-03-09 14:13:49 +0000230 unsigned long dubious = 0;
231 unsigned long suppressed = 0;
232
telsoa01c577f2c2018-08-31 09:22:23 +0100233 // Ensure that runtime is large enough before checking for memory leaks.
234 // Otherwise, when loading the network, it will automatically reserve memory that won't be released
235 // until destruction.
telsoa014fcda012018-03-09 14:13:49 +0000236 armnn::NetworkId networkIdentifier;
telsoa01c577f2c2018-08-31 09:22:23 +0100237 armnn::IRuntime::CreationOptions options;
238 armnn::Runtime runtime(options);
telsoa014fcda012018-03-09 14:13:49 +0000239 armnn::RuntimeLoadedNetworksReserve(&runtime);
240
telsoa01c577f2c2018-08-31 09:22:23 +0100241 // Checks for leaks before we load the network and record them so that we can see the delta after unloading.
telsoa014fcda012018-03-09 14:13:49 +0000242 VALGRIND_DO_QUICK_LEAK_CHECK;
243 VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed);
244
245 // build a mock-network and load it into the runtime
David Beckf0b48452018-10-19 15:20:56 +0100246 std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
telsoa014fcda012018-03-09 14:13:49 +0000247 {
248 armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
249 armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
250
251 armnn::INetworkPtr mockNetwork(armnn::INetwork::Create());
252
253 armnn::IConnectableLayer* input = mockNetwork->AddInputLayer(0, "input");
254 armnn::IConnectableLayer* layer = mockNetwork->AddActivationLayer(armnn::ActivationDescriptor(), "test");
255 armnn::IConnectableLayer* output = mockNetwork->AddOutputLayer(0, "output");
256
257 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
258 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
259
telsoa01c577f2c2018-08-31 09:22:23 +0100260 // Sets the tensors in the network.
telsoa014fcda012018-03-09 14:13:49 +0000261 input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
262 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
263
264 // optimize the network
telsoa01c577f2c2018-08-31 09:22:23 +0100265 armnn::IOptimizedNetworkPtr optNet = Optimize(*mockNetwork, backends, runtime.GetDeviceSpec());
telsoa014fcda012018-03-09 14:13:49 +0000266
267 runtime.LoadNetwork(networkIdentifier, std::move(optNet));
268 }
269
270 runtime.UnloadNetwork(networkIdentifier);
271
272 VALGRIND_DO_ADDED_LEAK_CHECK;
273 VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
274
telsoa01c577f2c2018-08-31 09:22:23 +0100275 // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
telsoa014fcda012018-03-09 14:13:49 +0000276 BOOST_TEST(leakedBefore == leakedAfter);
277
278 // Add resonable threshold after and before running valgrind with the ACL clear cache function.
surmeh013537c2c2018-05-18 16:31:43 +0100279 // TODO Threshold set to 80k until the root cause of the memory leakage is found and fixed. Revert threshold
telsoa01c577f2c2018-08-31 09:22:23 +0100280 // value to 1024 when fixed.
surmeh013537c2c2018-05-18 16:31:43 +0100281 BOOST_TEST(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 81920);
telsoa014fcda012018-03-09 14:13:49 +0000282
telsoa01c577f2c2018-08-31 09:22:23 +0100283 // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
284 // so they are assigned to, but still considered unused, causing a warning.
telsoa014fcda012018-03-09 14:13:49 +0000285 boost::ignore_unused(dubious);
286 boost::ignore_unused(suppressed);
287}
288#endif
289
surmeh013537c2c2018-05-18 16:31:43 +0100290// Note: this part of the code is due to be removed when we fully trust the gperftools based results.
telsoa014fcda012018-03-09 14:13:49 +0000291#ifdef WITH_VALGRIND
telsoa01c577f2c2018-08-31 09:22:23 +0100292// Run with the following command to get all the amazing output (in the devenv/build folder) :)
telsoa014fcda012018-03-09 14:13:49 +0000293// valgrind --leak-check=full --show-leak-kinds=all --log-file=Valgrind_Memcheck_Leak_Report.txt armnn/test/UnitTests
294BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
295{
296 // From documentation:
297
298 // This means that no pointer to the block can be found. The block is classified as "lost",
299 // because the programmer could not possibly have freed it at program exit, since no pointer to it exists.
300 unsigned long leakedBefore = 0;
301 unsigned long leakedAfter = 0;
302
303 // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at,
304 // the programmer could, at least in principle, have freed it before program exit.
telsoa01c577f2c2018-08-31 09:22:23 +0100305 // We want to test this in case memory is not freed as early as it could have been.
telsoa014fcda012018-03-09 14:13:49 +0000306 unsigned long reachableBefore = 0;
307 unsigned long reachableAfter = 0;
308
telsoa01c577f2c2018-08-31 09:22:23 +0100309 // Needed as out params but we don't test them.
telsoa014fcda012018-03-09 14:13:49 +0000310 unsigned long dubious = 0;
311 unsigned long suppressed = 0;
312
313 armnn::NetworkId networkIdentifier1 = 1;
314
315 // ensure that runtime is large enough before checking for memory leaks
316 // otherwise when loading the network it will automatically reserve memory that won't be released until destruction
telsoa01c577f2c2018-08-31 09:22:23 +0100317 armnn::IRuntime::CreationOptions options;
318 armnn::Runtime runtime(options);
telsoa014fcda012018-03-09 14:13:49 +0000319 armnn::RuntimeLoadedNetworksReserve(&runtime);
320
telsoa01c577f2c2018-08-31 09:22:23 +0100321 // Checks for leaks before we load the network and record them so that we can see the delta after unloading.
telsoa014fcda012018-03-09 14:13:49 +0000322 VALGRIND_DO_QUICK_LEAK_CHECK;
323 VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed);
324
telsoa01c577f2c2018-08-31 09:22:23 +0100325 // Builds a mock-network and load it into the runtime.
telsoa014fcda012018-03-09 14:13:49 +0000326 {
327 unsigned int inputShape[] = {1, 7, 1, 1};
328 armnn::TensorInfo inputTensorInfo(4, inputShape, armnn::DataType::Float32);
329
330 std::unique_ptr<armnn::Network> mockNetwork1 = std::make_unique<armnn::Network>();
331 mockNetwork1->AddInputLayer(0, "test layer");
332
telsoa014fcda012018-03-09 14:13:49 +0000333
David Beckf0b48452018-10-19 15:20:56 +0100334 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
telsoa01c577f2c2018-08-31 09:22:23 +0100335 runtime.LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime.GetDeviceSpec()));
telsoa014fcda012018-03-09 14:13:49 +0000336 }
337
338 runtime.UnloadNetwork(networkIdentifier1);
339
340 VALGRIND_DO_ADDED_LEAK_CHECK;
341 VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
342
telsoa01c577f2c2018-08-31 09:22:23 +0100343 // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
telsoa014fcda012018-03-09 14:13:49 +0000344 BOOST_TEST(leakedBefore == leakedAfter);
surmeh01bceff2f2018-03-29 16:29:27 +0100345
346 #if defined(ARMCOMPUTECL_ENABLED)
347 // reachableBefore == reachableAfter should hold, but on OpenCL with Android we are still
348 // not entirely able to control the memory in the OpenCL driver. Testing is showing that
349 // after this test (which clears all OpenCL memory) we are clearing a little bit more than
350 // we expect, probably depending on the order in which other tests are run.
351 BOOST_TEST(reachableBefore - reachableAfter <= 24);
352 #else
353 BOOST_TEST(reachableBefore == reachableAfter);
354 #endif
355
356 BOOST_TEST(reachableBefore >= reachableAfter);
telsoa014fcda012018-03-09 14:13:49 +0000357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
359 // so they are assigned to, but still considered unused, causing a warning.
telsoa014fcda012018-03-09 14:13:49 +0000360 boost::ignore_unused(dubious);
361 boost::ignore_unused(suppressed);
362}
363#endif
364
telsoa01c577f2c2018-08-31 09:22:23 +0100365#if ARMCOMPUTENEON_ENABLED
366BOOST_AUTO_TEST_CASE(RuntimeValidateCpuAccDeviceSupportLayerNoFallback)
367{
368 // build up the structure of the network
369 armnn::INetworkPtr net(armnn::INetwork::Create());
370
371 armnn::IConnectableLayer* input = net->AddInputLayer(0);
372
373 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
374
375 input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
376
377 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
378
379 armnn::IRuntime::CreationOptions options;
380 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
381
David Beckf0b48452018-10-19 15:20:56 +0100382 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
telsoa01c577f2c2018-08-31 09:22:23 +0100383 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
384 BOOST_CHECK(optNet);
385
386 // Load it into the runtime. It should success.
387 armnn::NetworkId netId;
388 BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
389}
390#endif // ARMCOMPUTENEON_ENABLED
391
392#if ARMCOMPUTECL_ENABLED
393BOOST_AUTO_TEST_CASE(RuntimeValidateGpuDeviceSupportLayerNoFallback)
394{
395 // build up the structure of the network
396 armnn::INetworkPtr net(armnn::INetwork::Create());
397
398 armnn::IConnectableLayer* input = net->AddInputLayer(0);
399
400 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
401
402 input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
403
404 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
405
406 armnn::IRuntime::CreationOptions options;
407 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
408
David Beckf0b48452018-10-19 15:20:56 +0100409 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
telsoa01c577f2c2018-08-31 09:22:23 +0100410 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
411 BOOST_CHECK(optNet);
412
413 // Load it into the runtime. It should success.
414 armnn::NetworkId netId;
415 BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
416}
417#endif // ARMCOMPUTECL_ENABLED
418
419BOOST_AUTO_TEST_CASE(RuntimeCpuRef)
420{
421 using namespace armnn;
422
423 // Create runtime in which test will run
424 armnn::IRuntime::CreationOptions options;
425 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
426
427 // build up the structure of the network
428 INetworkPtr net(INetwork::Create());
429
430 IConnectableLayer* input = net->AddInputLayer(0);
431
432 // This layer configuration isn't supported by CpuAcc, should be fall back to CpuRef.
433 NormalizationDescriptor descriptor;
434 IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
435
436 IConnectableLayer* output = net->AddOutputLayer(0);
437
438 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
439 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
440
441 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
442 normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
443
444 // optimize the network
David Beckf0b48452018-10-19 15:20:56 +0100445 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
telsoa01c577f2c2018-08-31 09:22:23 +0100446 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
447
448 // Load it into the runtime. It should success.
449 armnn::NetworkId netId;
450 BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
451}
452
453BOOST_AUTO_TEST_CASE(RuntimeFallbackToCpuRef)
454{
455 using namespace armnn;
456
457 // Create runtime in which test will run
458 armnn::IRuntime::CreationOptions options;
459 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
460
461 // build up the structure of the network
462 INetworkPtr net(INetwork::Create());
463
464 IConnectableLayer* input = net->AddInputLayer(0);
465
466 // This layer configuration isn't supported by CpuAcc, should be fall back to CpuRef.
467 NormalizationDescriptor descriptor;
468 IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
469
470 IConnectableLayer* output = net->AddOutputLayer(0);
471
472 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
473 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
474
475 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
476 normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
477
478 // Allow fallback to CpuRef.
David Beckf0b48452018-10-19 15:20:56 +0100479 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
telsoa01c577f2c2018-08-31 09:22:23 +0100480 // optimize the network
481 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
482
483 // Load it into the runtime. It should succeed.
484 armnn::NetworkId netId;
485 BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
486}
487
jimfly016b0b53d2018-10-08 14:43:01 +0100488BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
489{
490 // Test for issue reported by Chris Nix in https://jira.arm.com/browse/IVGCVSW-1929
491 using namespace armnn;
492
493 // Create runtime in which test will run
494 armnn::IRuntime::CreationOptions options;
495 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
496
497 // build up the structure of the network
498 INetworkPtr net(INetwork::Create());
499 armnn::IConnectableLayer* input = net->AddInputLayer(
500 0,
501 "input"
502 );
503 armnn::IConnectableLayer* softmax = net->AddSoftmaxLayer(
504 armnn::SoftmaxDescriptor(),
505 "softmax"
506 );
507 armnn::IConnectableLayer* output = net->AddOutputLayer(
508 0,
509 "output"
510 );
511
512 input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
513 softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
514
515 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
516 armnn::TensorShape({ 1, 5 }),
517 armnn::DataType::QuantisedAsymm8,
518 1.0f/255,
519 0
520 ));
521
522 softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
523 armnn::TensorShape({ 1, 5 }),
524 armnn::DataType::QuantisedAsymm8
525 ));
526
David Beckf0b48452018-10-19 15:20:56 +0100527 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
jimfly016b0b53d2018-10-08 14:43:01 +0100528 std::vector<std::string> errMessages;
529 armnn::IOptimizedNetworkPtr optNet = Optimize(
530 *net,
531 backends,
532 runtime->GetDeviceSpec(),
533 OptimizerOptions(),
534 errMessages
535 );
David Beckf0b48452018-10-19 15:20:56 +0100536
jimfly016b0b53d2018-10-08 14:43:01 +0100537 BOOST_TEST(errMessages.size() == 1);
538 BOOST_TEST(errMessages[0] ==
539 "ERROR: output 0 of layer Softmax (softmax) is of type "
540 "Quantized 8 bit but its scale parameter has not been set");
541 BOOST_TEST(!optNet);
542}
543
telsoa014fcda012018-03-09 14:13:49 +0000544BOOST_AUTO_TEST_SUITE_END()