blob: 66d166fc08bd6aee5aaf41e9acd75b134cf3e810 [file] [log] [blame]
Aron Virginas-Tar70104002018-10-24 15:33:28 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
Aron Virginas-Tar70104002018-10-24 15:33:28 +01006
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007#include <Graph.hpp>
8#include <Network.hpp>
9
10#include <reference/RefWorkloadFactory.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +010011
12#include <boost/test/unit_test.hpp>
13
14BOOST_AUTO_TEST_SUITE(OptimizedNetwork)
15
16BOOST_AUTO_TEST_CASE(SerializeToDot)
17{
Francis Murtagh3d2b4b22021-02-15 18:23:17 +000018 // build up the structure of the network
19 armnn::INetworkPtr net(armnn::INetwork::Create());
Aron Virginas-Tar70104002018-10-24 15:33:28 +010020
21 //Defines layers.
Francis Murtagh3d2b4b22021-02-15 18:23:17 +000022 auto input = net->AddInputLayer(0);
23 auto add = net->AddAdditionLayer();
24 auto output = net->AddOutputLayer(0);
Aron Virginas-Tar70104002018-10-24 15:33:28 +010025
26 // Connects layers.
27 input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
28 input->GetOutputSlot(0).Connect(add->GetInputSlot(1));
29 add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
30
31 armnn::TensorShape shape({4});
32 armnn::TensorInfo info(shape, armnn::DataType::Float32);
33 input->GetOutputSlot(0).SetTensorInfo(info);
34 add->GetOutputSlot(0).SetTensorInfo(info);
35
36 armnn::IRuntime::CreationOptions options;
37 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
38
39 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
Francis Murtagh3d2b4b22021-02-15 18:23:17 +000040 armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
Aron Virginas-Tar70104002018-10-24 15:33:28 +010041
42 std::ostringstream ss;
43 optimizedNet->SerializeToDot(ss);
44
45 auto inputId = input->GetGuid();
46 auto addId = add->GetGuid();
47 auto outputId = output->GetGuid();
48
49 std::stringstream expected;
50 expected <<
51 "digraph Optimized {\n"
52 " node [shape=\"record\"];\n"
53 " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
Rob Hughesb17220d2020-08-28 11:48:35 +010054 " " << inputId << " [label=\"{Input|Guid : " << inputId << "\\lLayerType : Input\\l"
55 "BackendID : CpuRef\\l}\"];\n"
56 " " << addId << " [label=\"{Addition|Guid : " << addId << "\\lLayerType : Addition\\l"
57 "BackendID : CpuRef\\l}\"];\n"
58 " " << outputId << " [label=\"{Output|Guid : " << outputId << "\\lLayerType : Output\\l"
59 "BackendID : CpuRef\\l}\"];\n"
Aron Virginas-Tar70104002018-10-24 15:33:28 +010060 " " << inputId << " -> " << addId << " [label=< [4] >];\n"
61 " " << inputId << " -> " << addId << " [label=< [4] >];\n"
62 " " << addId << " -> " << outputId << " [label=< [4] >];\n"
63 "}\n";
64
65 BOOST_TEST(ss.str() == expected.str());
66}
67
68BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
69{
70 // build up the structure of the network
71 armnn::INetworkPtr net(armnn::INetwork::Create());
72
73 armnn::IConnectableLayer* input = net->AddInputLayer(0);
74
75 // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
76 armnn::NormalizationDescriptor descriptor;
77 armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
78
79 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
80
81 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
82 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
83
84 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
85 normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
86
87 armnn::IRuntime::CreationOptions options;
88 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
89
90 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
Mike Kelly3a613cc2020-09-29 20:50:35 +010091 std::vector<std::string> errMessages;
92
93 try
94 {
95 Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
96 BOOST_FAIL("Should have thrown an exception.");
97 }
98 catch (const armnn::InvalidArgumentException& e)
99 {
100 // Different exceptions are thrown on different backends
101 }
102 BOOST_CHECK(errMessages.size() > 0);
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100103}
104
105BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback)
106{
107 // build up the structure of the network
108 armnn::INetworkPtr net(armnn::INetwork::Create());
109
110 armnn::IConnectableLayer* input = net->AddInputLayer(0);
111
112 // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
113 armnn::NormalizationDescriptor descriptor;
114 armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
115
116 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
117
118 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
119 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
120
121 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
122 normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
123
124 armnn::IRuntime::CreationOptions options;
125 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
126
127 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
128 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
129 BOOST_REQUIRE(optNet);
130
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000131 armnn::Graph& graph = GetGraphForTesting(optNet.get());
132 graph.AllocateDynamicBuffers();
133
134 for (auto&& layer : graph)
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100135 {
136 // If NEON is enabled, Input and Output layers are supported by CpuAcc,
137 // the other layers are supported by CpuRef.
138 // If NEON is not enabled, all layers are supported by CpuRef.
Matteo Martincighd95e9062019-01-31 15:35:59 +0000139#if defined(ARMCOMPUTENEON_ENABLED)
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100140 if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
141 {
142 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
143 }
144 else if (layer->GetType() == armnn::LayerType::Normalization)
145 {
146 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
147 }
148#else
149 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
150#endif
151 }
152}
153
154BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
155{
156 const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
157
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000158 // build up the structure of the network
159 armnn::INetworkPtr net(armnn::INetwork::Create());
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100160
161 armnn::NormalizationDescriptor nmDesc;
162 armnn::ActivationDescriptor acDesc;
163
164 // in
165 // |
166 // nm
167 // / |
168 // ac |
169 // \ |
170 // ml
171 // |
172 // sm
173 // |
174 // ot
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000175 armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100176 layer->GetOutputSlot(0).SetTensorInfo(desc);
177
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000178 armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100179
180 layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
181 normLayer->GetOutputSlot(0).SetTensorInfo(desc);
182
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000183 layer = net->AddActivationLayer(acDesc, "ac");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100184
185 normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
186 layer->GetOutputSlot(0).SetTensorInfo(desc);
187
188 armnn::IConnectableLayer* prevLayer = layer;
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000189 layer = net->AddMultiplicationLayer("ml");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100190
191 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
192 normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
193 layer->GetOutputSlot(0).SetTensorInfo(desc);
194
195 prevLayer = layer;
196 armnn::SoftmaxDescriptor softmaxDescriptor;
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000197 layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100198
199 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
200 layer->GetOutputSlot(0).SetTensorInfo(desc);
201
202 prevLayer = layer;
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000203 layer = net->AddOutputLayer(0, "ot");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100204
205 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
206
207 armnn::IRuntime::CreationOptions options;
208 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
209
210 std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined };
Mike Kelly3a613cc2020-09-29 20:50:35 +0100211 std::vector<std::string> errMessages;
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100212
Mike Kelly3a613cc2020-09-29 20:50:35 +0100213 try
214 {
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000215 Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
Mike Kelly3a613cc2020-09-29 20:50:35 +0100216 BOOST_FAIL("Should have thrown an exception.");
217 }
218 catch (const armnn::InvalidArgumentException& e)
219 {
220 // Different exceptions are thrown on different backends
221 }
222 BOOST_CHECK(errMessages.size() > 0);
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100223}
224
225BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback)
226{
227 const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
228
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000229 // build up the structure of the network
230 armnn::INetworkPtr net(armnn::INetwork::Create());
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100231
232 armnn::NormalizationDescriptor nmDesc;
233 armnn::ActivationDescriptor acDesc;
234
235 // in
236 // |
237 // nm
238 // / |
239 // ac |
240 // \ |
241 // ml
242 // |
243 // sm
244 // |
245 // ot
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000246 armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100247 layer->GetOutputSlot(0).SetTensorInfo(desc);
248
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000249 armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100250
251 layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
252 normLayer->GetOutputSlot(0).SetTensorInfo(desc);
253
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000254 layer = net->AddActivationLayer(acDesc, "ac");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100255
256 normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
257 layer->GetOutputSlot(0).SetTensorInfo(desc);
258
259 armnn::IConnectableLayer* prevLayer = layer;
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000260 layer = net->AddMultiplicationLayer("ml");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100261
262 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
263 normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
264 layer->GetOutputSlot(0).SetTensorInfo(desc);
265
266 prevLayer = layer;
267 armnn::SoftmaxDescriptor softmaxDescriptor;
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000268 layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100269
270 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
271 layer->GetOutputSlot(0).SetTensorInfo(desc);
272
273 prevLayer = layer;
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000274 layer = net->AddOutputLayer(0, "ot");
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100275
276 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
277
278 armnn::IRuntime::CreationOptions options;
279 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
280
281 std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
282
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000283 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100284 BOOST_CHECK(optNet);
285
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000286 armnn::Graph& graph = GetGraphForTesting(optNet.get());
287 graph.AllocateDynamicBuffers();
288
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100289 // validate workloads
290 armnn::RefWorkloadFactory fact;
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000291 for (auto&& layer : graph)
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100292 {
293 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
294 BOOST_CHECK_NO_THROW(
Derek Lamberti94a88d22019-12-10 21:12:59 +0000295 layer->CreateWorkload(fact));
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100296 }
297}
298
299BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback)
300{
301 // build up the structure of the network
302 armnn::INetworkPtr net(armnn::INetwork::Create());
303
304 armnn::IConnectableLayer* input = net->AddInputLayer(0);
305
306 // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
307 armnn::NormalizationDescriptor descriptor;
308 armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
309
310 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
311
312 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
313 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
314
315 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
316 normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
317
318 armnn::IRuntime::CreationOptions options;
319 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
320
321 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
322 armnn::Compute::GpuAcc,
323 armnn::Compute::CpuRef };
324
325 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
326 BOOST_REQUIRE(optNet);
327
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000328 armnn::Graph& graph = GetGraphForTesting(optNet.get());
329 graph.AllocateDynamicBuffers();
330
331 for (auto&& layer : graph)
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100332 {
333 // If NEON is enabled, Input and Output layers are supported by CpuAcc,
334 // the other layers are supported by CpuRef.
335 // If only CL is enabled, Input and Output layers are supported by GpuAcc,
336 // the other layers are supported by CpuRef.
337 // If neither NEON, nor CL is enabled, all layers are supported by CpuRef.
Matteo Martincighd95e9062019-01-31 15:35:59 +0000338#if defined(ARMCOMPUTENEON_ENABLED)
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100339 if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
340 {
341 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
342 }
343 else if (layer->GetType() == armnn::LayerType::Normalization)
344 {
345 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
346 }
Matteo Martincighd95e9062019-01-31 15:35:59 +0000347#elif defined(ARMCOMPUTECL_ENABLED)
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100348 if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
349 {
350 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
351 }
352 else if (layer->GetType() == armnn::LayerType::Normalization)
353 {
354 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
355 }
356#else
357 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
358#endif
359 }
360}
361
362BOOST_AUTO_TEST_SUITE_END()