blob: db01fa7dcf22a704496588e694b548791bbe1b3c [file] [log] [blame]
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00006#include <test/RuntimeTests.hpp>
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +01007
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <LeakChecking.hpp>
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +01009
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <backendsCommon/test/RuntimeTestImpl.hpp>
Narumol Prangnawaratdf31cfe2019-11-22 11:26:06 +000011#include <test/ProfilingTestUtils.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000012#include <armnn/utility/IgnoreUnused.hpp>
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010013
Sadik Armagan1625efc2021-06-10 18:24:34 +010014#include <doctest/doctest.h>
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010015
16#ifdef WITH_VALGRIND
17#include <valgrind/memcheck.h>
18#endif
19
Sadik Armagan1625efc2021-06-10 18:24:34 +010020TEST_SUITE("ClRuntime")
21{
22TEST_CASE("RuntimeValidateGpuDeviceSupportLayerNoFallback")
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010023{
24 // build up the structure of the network
25 armnn::INetworkPtr net(armnn::INetwork::Create());
26
27 armnn::IConnectableLayer* input = net->AddInputLayer(0);
28 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
29
30 input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
31 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
32
33 armnn::IRuntime::CreationOptions options;
34 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
35
36 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
37 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
Sadik Armagan1625efc2021-06-10 18:24:34 +010038 CHECK(optNet);
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010039
40 // Load it into the runtime. It should success.
41 armnn::NetworkId netId;
Sadik Armagan1625efc2021-06-10 18:24:34 +010042 CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010043}
44
45#ifdef ARMNN_LEAK_CHECKING_ENABLED
Sadik Armagan1625efc2021-06-10 18:24:34 +010046TEST_CASE("RuntimeMemoryLeaksGpuAcc")
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010047{
Sadik Armagan1625efc2021-06-10 18:24:34 +010048 CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010049 armnn::IRuntime::CreationOptions options;
Kevin Mayd92a6e42021-02-04 10:27:41 +000050 armnn::RuntimeImpl runtime(options);
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010051 armnn::RuntimeLoadedNetworksReserve(&runtime);
52
53 std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
54 {
55 // Do a warmup of this so we make sure that all one-time
56 // initialization happens before we do the leak checking.
57 CreateAndDropDummyNetwork(backends, runtime);
58 }
59
60 {
61 ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkGpuAcc");
Sadik Armagan1625efc2021-06-10 18:24:34 +010062 CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010063 // In the second run we check for all remaining memory
64 // in use after the network was unloaded. If there is any
65 // then it will be treated as a memory leak.
66 CreateAndDropDummyNetwork(backends, runtime);
Sadik Armagan1625efc2021-06-10 18:24:34 +010067 CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
68 CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
69 CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010070 }
71}
72#endif
73
74// Note: this part of the code is due to be removed when we fully trust the gperftools based results.
75#if defined(WITH_VALGRIND)
Sadik Armagan1625efc2021-06-10 18:24:34 +010076TEST_CASE("RuntimeMemoryUsage")
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +010077{
78 // From documentation:
79
80 // This means that no pointer to the block can be found. The block is classified as "lost",
81 // because the programmer could not possibly have freed it at program exit, since no pointer to it exists.
82 unsigned long leakedBefore = 0;
83 unsigned long leakedAfter = 0;
84
85 // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at,
86 // the programmer could, at least in principle, have freed it before program exit.
87 // We want to test this in case memory is not freed as early as it could have been.
88 unsigned long reachableBefore = 0;
89 unsigned long reachableAfter = 0;
90
91 // Needed as out params but we don't test them.
92 unsigned long dubious = 0;
93 unsigned long suppressed = 0;
94
95 // Ensure that runtime is large enough before checking for memory leaks.
96 // Otherwise, when loading the network, it will automatically reserve memory that won't be released
97 // until destruction.
98 armnn::NetworkId networkIdentifier;
99 armnn::IRuntime::CreationOptions options;
100 armnn::Runtime runtime(options);
101 armnn::RuntimeLoadedNetworksReserve(&runtime);
102
103 // Checks for leaks before we load the network and record them so that we can see the delta after unloading.
104 VALGRIND_DO_QUICK_LEAK_CHECK;
105 VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed);
106
107 // build a mock-network and load it into the runtime
108 std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
109 {
110 armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
111 armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
112
113 armnn::INetworkPtr mockNetwork(armnn::INetwork::Create());
114
115 armnn::IConnectableLayer* input = mockNetwork->AddInputLayer(0, "input");
116 armnn::IConnectableLayer* layer = mockNetwork->AddActivationLayer(armnn::ActivationDescriptor(), "test");
117 armnn::IConnectableLayer* output = mockNetwork->AddOutputLayer(0, "output");
118
119 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
120 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
121
122 // Sets the tensors in the network.
123 input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
124 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
125
126 // optimize the network
127 armnn::IOptimizedNetworkPtr optNet = Optimize(*mockNetwork, backends, runtime.GetDeviceSpec());
128
129 runtime.LoadNetwork(networkIdentifier, std::move(optNet));
130 }
131
132 runtime.UnloadNetwork(networkIdentifier);
133
134 VALGRIND_DO_ADDED_LEAK_CHECK;
135 VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
136
137 // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
Sadik Armagan1625efc2021-06-10 18:24:34 +0100138 CHECK(leakedBefore == leakedAfter);
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +0100139
140 // Add resonable threshold after and before running valgrind with the ACL clear cache function.
141 // TODO Threshold set to 80k until the root cause of the memory leakage is found and fixed. Revert threshold
142 // value to 1024 when fixed.
Sadik Armagan1625efc2021-06-10 18:24:34 +0100143 CHECK(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 81920);
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +0100144
145 // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
146 // so they are assigned to, but still considered unused, causing a warning.
Jan Eilers8eb25602020-03-09 12:13:48 +0000147 IgnoreUnused(dubious);
148 IgnoreUnused(suppressed);
Aron Virginas-Tarc26ba752018-10-22 13:32:01 +0100149}
150#endif
151
Sadik Armagan1625efc2021-06-10 18:24:34 +0100152TEST_CASE("ProfilingPostOptimisationStructureGpuAcc")
Narumol Prangnawaratdf31cfe2019-11-22 11:26:06 +0000153{
154 VerifyPostOptimisationStructureTestImpl(armnn::Compute::GpuAcc);
155}
156
Sadik Armagan1625efc2021-06-10 18:24:34 +0100157}