blob: e968d67cf5bd23881456839818938eb4250dd4c5 [file] [log] [blame]
Keith Davis3201eea2019-10-24 17:30:41 +01001//
Jim Flynn6398a982020-05-27 17:05:21 +01002// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
Keith Davis3201eea2019-10-24 17:30:41 +01003// SPDX-License-Identifier: MIT
4//
5
Jim Flynn4e755a52020-03-29 17:48:26 +01006#include <Filesystem.hpp>
Jim Flynn6398a982020-05-27 17:05:21 +01007#include <LabelsAndEventClasses.hpp>
Aron Virginas-Tar8bf442e2019-11-07 18:41:40 +00008#include <ProfilingService.hpp>
Jim Flynn6398a982020-05-27 17:05:21 +01009#include "ProfilingTestUtils.hpp"
Jim Flynn4e755a52020-03-29 17:48:26 +010010#include "PrintPacketHeaderHandler.hpp"
Jim Flynn6398a982020-05-27 17:05:21 +010011#include <Runtime.hpp>
Jim Flynn4e755a52020-03-29 17:48:26 +010012#include "TestTimelinePacketHandler.hpp"
Keith Davis3201eea2019-10-24 17:30:41 +010013
Keith Davis3201eea2019-10-24 17:30:41 +010014#include <boost/filesystem.hpp>
15#include <boost/numeric/conversion/cast.hpp>
16#include <boost/test/unit_test.hpp>
17
Keith Davis3201eea2019-10-24 17:30:41 +010018#include <cstdio>
Keith Davis3201eea2019-10-24 17:30:41 +010019#include <sstream>
20#include <sys/stat.h>
21
Keith Davis3201eea2019-10-24 17:30:41 +010022using namespace armnn::profiling;
23using namespace armnn;
24
25using namespace std::chrono_literals;
26
Finn Williams09ad6f92019-12-19 17:05:18 +000027class FileOnlyHelperService : public ProfilingService
28{
29 public:
30 // Wait for a notification from the send thread
31 bool WaitForPacketsSent(uint32_t timeout = 1000)
32 {
Sadik Armagan3184c902020-03-18 10:57:30 +000033 return ProfilingService::WaitForPacketSent(m_ProfilingService, timeout);
Finn Williams09ad6f92019-12-19 17:05:18 +000034 }
Sadik Armagan3184c902020-03-18 10:57:30 +000035 armnn::profiling::ProfilingService m_ProfilingService;
Finn Williams09ad6f92019-12-19 17:05:18 +000036};
37
Keith Davis3201eea2019-10-24 17:30:41 +010038BOOST_AUTO_TEST_SUITE(FileOnlyProfilingDecoratorTests)
39
Jim Flynn4e755a52020-03-29 17:48:26 +010040std::string UniqueFileName()
41{
42 std::time_t t = std::time(nullptr);
43 char mbstr[100];
44 std::strftime(mbstr, sizeof(mbstr), "%Y_%m_%d_%H_%M_%S_", std::localtime(&t));
45 std::stringstream ss;
46 ss << mbstr;
47 ss << t;
48 ss << ".bin";
49 return ss.str();
50}
51
52BOOST_AUTO_TEST_CASE(TestFileOnlyProfiling)
53{
Jim Flynn01d02812020-04-29 21:12:13 +010054 // This test requires at least one backend registry to be enabled
55 // which can execute a NormalizationLayer
Jim Flynn6398a982020-05-27 17:05:21 +010056 if (!HasSuitableBackendRegistered())
Finn Williams0c32ccf2020-05-12 13:37:06 +010057 {
58 return;
59 }
60
Jim Flynn4e755a52020-03-29 17:48:26 +010061 // Create a temporary file name.
Jim Flynnc9631102020-06-24 11:11:20 +010062 fs::path tempPath = fs::temp_directory_path();
63 fs::path tempFile = UniqueFileName();
64 tempPath = tempPath / tempFile;
Jim Flynn4e755a52020-03-29 17:48:26 +010065 armnn::Runtime::CreationOptions creationOptions;
66 creationOptions.m_ProfilingOptions.m_EnableProfiling = true;
67 creationOptions.m_ProfilingOptions.m_FileOnly = true;
68 creationOptions.m_ProfilingOptions.m_CapturePeriod = 100;
69 creationOptions.m_ProfilingOptions.m_TimelineEnabled = true;
70 ILocalPacketHandlerSharedPtr localPacketHandlerPtr = std::make_shared<TestTimelinePacketHandler>();
71 creationOptions.m_ProfilingOptions.m_LocalPacketHandlers.push_back(localPacketHandlerPtr);
72
73 armnn::Runtime runtime(creationOptions);
Jim Flynn6398a982020-05-27 17:05:21 +010074 // ensure the GUID generator is reset to zero
75 GetProfilingService(&runtime).ResetGuidGenerator();
Jim Flynn4e755a52020-03-29 17:48:26 +010076
77 // Load a simple network
78 // build up the structure of the network
79 INetworkPtr net(INetwork::Create());
80
81 IConnectableLayer* input = net->AddInputLayer(0, "input");
82
Jim Flynnd5ba9aa2020-06-24 10:32:43 +010083 ElementwiseUnaryDescriptor descriptor(UnaryOperation::Sqrt);
84 IConnectableLayer* normalize = net->AddElementwiseUnaryLayer(descriptor, "normalization");
Jim Flynn4e755a52020-03-29 17:48:26 +010085
86 IConnectableLayer* output = net->AddOutputLayer(0, "output");
87
88 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
89 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
90
91 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
92 normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
93
94 // optimize the network
Jim Flynn01d02812020-04-29 21:12:13 +010095 std::vector<armnn::BackendId> backends =
96 { armnn::Compute::CpuRef, armnn::Compute::CpuAcc, armnn::Compute::GpuAcc };
Jim Flynn4e755a52020-03-29 17:48:26 +010097 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime.GetDeviceSpec());
98
99 // Load it into the runtime. It should succeed.
100 armnn::NetworkId netId;
101 BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
102
Jim Flynn01d02812020-04-29 21:12:13 +0100103 // Creates structures for input & output.
104 std::vector<float> inputData(16);
105 std::vector<float> outputData(16);
Jim Flynnd5ba9aa2020-06-24 10:32:43 +0100106 for (unsigned int i = 0; i < 16; ++i)
107 {
108 inputData[i] = 9.0;
109 outputData[i] = 3.0;
110 }
Jim Flynn01d02812020-04-29 21:12:13 +0100111
112 InputTensors inputTensors
113 {
114 {0, ConstTensor(runtime.GetInputTensorInfo(netId, 0), inputData.data())}
115 };
116 OutputTensors outputTensors
117 {
118 {0, Tensor(runtime.GetOutputTensorInfo(netId, 0), outputData.data())}
119 };
120
121 // Does the inference.
122 runtime.EnqueueWorkload(netId, inputTensors, outputTensors);
123
Jim Flynn4e755a52020-03-29 17:48:26 +0100124 static_cast<TestTimelinePacketHandler*>(localPacketHandlerPtr.get())->WaitOnInferenceCompletion(3000);
Jim Flynn6398a982020-05-27 17:05:21 +0100125
126 const TimelineModel& model =
127 static_cast<TestTimelinePacketHandler*>(localPacketHandlerPtr.get())->GetTimelineModel();
128
129 for (auto& error : model.GetErrors())
130 {
131 std::cout << error.what() << std::endl;
132 }
133 BOOST_TEST(model.GetErrors().empty());
134 std::vector<std::string> desc = GetModelDescription(model);
135 std::vector<std::string> expectedOutput;
136 expectedOutput.push_back("Entity [0] name = input type = layer");
137 expectedOutput.push_back(" connection [14] from entity [0] to entity [1]");
138 expectedOutput.push_back(" child: Entity [23] backendId = CpuRef type = workload");
139 expectedOutput.push_back("Entity [1] name = normalization type = layer");
140 expectedOutput.push_back(" connection [22] from entity [1] to entity [2]");
141 expectedOutput.push_back(" child: Entity [15] backendId = CpuRef type = workload");
142 expectedOutput.push_back("Entity [2] name = output type = layer");
143 expectedOutput.push_back(" child: Entity [27] backendId = CpuRef type = workload");
144 expectedOutput.push_back("Entity [6] type = network");
145 expectedOutput.push_back(" child: Entity [0] name = input type = layer");
146 expectedOutput.push_back(" child: Entity [1] name = normalization type = layer");
147 expectedOutput.push_back(" child: Entity [2] name = output type = layer");
148 expectedOutput.push_back(" execution: Entity [31] type = inference");
149 expectedOutput.push_back("Entity [15] backendId = CpuRef type = workload");
150 expectedOutput.push_back(" execution: Entity [44] type = workload_execution");
151 expectedOutput.push_back("Entity [23] backendId = CpuRef type = workload");
152 expectedOutput.push_back(" execution: Entity [36] type = workload_execution");
153 expectedOutput.push_back("Entity [27] backendId = CpuRef type = workload");
154 expectedOutput.push_back(" execution: Entity [52] type = workload_execution");
155 expectedOutput.push_back("Entity [31] type = inference");
156 expectedOutput.push_back(" child: Entity [36] type = workload_execution");
157 expectedOutput.push_back(" child: Entity [44] type = workload_execution");
158 expectedOutput.push_back(" child: Entity [52] type = workload_execution");
159 expectedOutput.push_back(" event: [34] class [start_of_life]");
160 expectedOutput.push_back(" event: [60] class [end_of_life]");
161 expectedOutput.push_back("Entity [36] type = workload_execution");
162 expectedOutput.push_back(" event: [40] class [start_of_life]");
163 expectedOutput.push_back(" event: [42] class [end_of_life]");
164 expectedOutput.push_back("Entity [44] type = workload_execution");
165 expectedOutput.push_back(" event: [48] class [start_of_life]");
166 expectedOutput.push_back(" event: [50] class [end_of_life]");
167 expectedOutput.push_back("Entity [52] type = workload_execution");
168 expectedOutput.push_back(" event: [56] class [start_of_life]");
169 expectedOutput.push_back(" event: [58] class [end_of_life]");
170 BOOST_TEST(CompareOutput(desc, expectedOutput));
Jim Flynn4e755a52020-03-29 17:48:26 +0100171}
172
Jim Flynnc9631102020-06-24 11:11:20 +0100173BOOST_AUTO_TEST_CASE(DumpOutgoingValidFileEndToEnd)
Keith Davis3201eea2019-10-24 17:30:41 +0100174{
Jim Flynnc9631102020-06-24 11:11:20 +0100175 // This test requires at least one backend registry to be enabled
176 // which can execute a NormalizationLayer
177 if (!HasSuitableBackendRegistered())
Keith Davis3201eea2019-10-24 17:30:41 +0100178 {
Jim Flynnc9631102020-06-24 11:11:20 +0100179 return;
Keith Davis3201eea2019-10-24 17:30:41 +0100180 }
181
Jim Flynnc9631102020-06-24 11:11:20 +0100182 // Create a temporary file name.
183 fs::path tempPath = fs::temp_directory_path();
184 fs::path tempFile = UniqueFileName();
185 tempPath = tempPath / tempFile;
186 armnn::Runtime::CreationOptions options;
187 options.m_ProfilingOptions.m_EnableProfiling = true;
188 options.m_ProfilingOptions.m_FileOnly = true;
189 options.m_ProfilingOptions.m_IncomingCaptureFile = "";
190 options.m_ProfilingOptions.m_OutgoingCaptureFile = tempPath.string();
191 options.m_ProfilingOptions.m_CapturePeriod = 100;
192 options.m_ProfilingOptions.m_TimelineEnabled = true;
Keith Davis3201eea2019-10-24 17:30:41 +0100193
Jim Flynnc9631102020-06-24 11:11:20 +0100194 ILocalPacketHandlerSharedPtr localPacketHandlerPtr = std::make_shared<TestTimelinePacketHandler>();
195 options.m_ProfilingOptions.m_LocalPacketHandlers.push_back(localPacketHandlerPtr);
Keith Davis3201eea2019-10-24 17:30:41 +0100196
Jim Flynnc9631102020-06-24 11:11:20 +0100197 // Make sure the file does not exist at this point
198 BOOST_CHECK(armnnUtils::Filesystem::GetFileSize(tempPath.string().c_str()) == -1);
Keith Davis3201eea2019-10-24 17:30:41 +0100199
Jim Flynnc9631102020-06-24 11:11:20 +0100200 armnn::Runtime runtime(options);
201 // ensure the GUID generator is reset to zero
202 GetProfilingService(&runtime).ResetGuidGenerator();
203
204 // Load a simple network
205 // build up the structure of the network
206 INetworkPtr net(INetwork::Create());
207
208 IConnectableLayer* input = net->AddInputLayer(0, "input");
209
210 ElementwiseUnaryDescriptor descriptor(UnaryOperation::Sqrt);
211 IConnectableLayer* normalize = net->AddElementwiseUnaryLayer(descriptor, "normalization");
212
213 IConnectableLayer* output = net->AddOutputLayer(0, "output");
214
215 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
216 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
217
218 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
219 normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
220
221 // optimize the network
222 std::vector<armnn::BackendId> backends =
223 { armnn::Compute::CpuRef, armnn::Compute::CpuAcc, armnn::Compute::GpuAcc };
224 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime.GetDeviceSpec());
225
226 // Load it into the runtime. It should succeed.
227 armnn::NetworkId netId;
228 BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
229
230 // Creates structures for input & output.
231 std::vector<float> inputData(16);
232 std::vector<float> outputData(16);
233 for (unsigned int i = 0; i < 16; ++i)
234 {
235 inputData[i] = 9.0;
236 outputData[i] = 3.0;
237 }
238
239 InputTensors inputTensors
240 {
241 {0, ConstTensor(runtime.GetInputTensorInfo(netId, 0), inputData.data())}
242 };
243 OutputTensors outputTensors
244 {
245 {0, Tensor(runtime.GetOutputTensorInfo(netId, 0), outputData.data())}
246 };
247
248 // Does the inference.
249 runtime.EnqueueWorkload(netId, inputTensors, outputTensors);
250
251 static_cast<TestTimelinePacketHandler*>(localPacketHandlerPtr.get())->WaitOnInferenceCompletion(3000);
Keith Davis3201eea2019-10-24 17:30:41 +0100252
253 // In order to flush the files we need to gracefully close the profiling service.
Jim Flynnc9631102020-06-24 11:11:20 +0100254 options.m_ProfilingOptions.m_EnableProfiling = false;
255 GetProfilingService(&runtime).ResetExternalProfilingOptions(options.m_ProfilingOptions, true);
Keith Davis3201eea2019-10-24 17:30:41 +0100256
257 // The output file size should be greater than 0.
Rob Hughesbdee4262020-01-07 17:05:24 +0000258 BOOST_CHECK(armnnUtils::Filesystem::GetFileSize(tempPath.string().c_str()) > 0);
Keith Davis3201eea2019-10-24 17:30:41 +0100259
Jim Flynnc9631102020-06-24 11:11:20 +0100260 // NOTE: would be an interesting exercise to take this file and decode it
261
Keith Davis3201eea2019-10-24 17:30:41 +0100262 // Delete the tmp file.
Rob Hughesbdee4262020-01-07 17:05:24 +0000263 BOOST_CHECK(armnnUtils::Filesystem::Remove(tempPath.string().c_str()));
Keith Davis3201eea2019-10-24 17:30:41 +0100264}
265
266BOOST_AUTO_TEST_SUITE_END()