blob: 473cda124725bee67ae63f4ea1e3d88bf67344c3 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// See LICENSE file in the project root for full license information.
4//
5#include <boost/test/unit_test.hpp>
6
7#include "armnn/ArmNN.hpp"
8#include "Graph.hpp"
9#include "Layer.hpp"
10#include "Layers.hpp"
11#include "armnn/TypesUtils.hpp"
12#include "armnn/Exceptions.hpp"
13
14#include "GraphUtils.hpp"
15#include "backends/CpuTensorHandle.hpp"
16
17#include <boost/cast.hpp>
18
19/// checks that first comes before second in the order
20bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second)
21{
22 graph.Print();
23
24 const auto& order = graph.TopologicalSort();
25
26 auto firstPos = std::find(order.begin(), order.end(), first);
27 auto secondPos = std::find(firstPos, order.end(), second);
28
29 return (secondPos != order.end());
30}
31
32static armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name)
33{
34 for (auto&& layer : graph)
35 {
36 if (layer->GetNameStr() == name)
37 {
38 return layer;
39 }
40 }
41 return nullptr;
42}
43
44BOOST_AUTO_TEST_SUITE(Graph)
45
46BOOST_AUTO_TEST_CASE(ClassGraph)
47{
48 armnn::Graph graph;
49 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
50 BOOST_TEST(GraphHasNamedLayer(graph, "layerA"));
51}
52
53BOOST_AUTO_TEST_CASE(TopologicalSort)
54{
55 armnn::Graph graph;
56
57 armnn::ActivationDescriptor activationDefaults;
58
59 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
60 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
61 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
62 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
63 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
64 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
65
66 armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
67 armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
68 armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
69 armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
70 armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
71 armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
72
73 // simple graph which branches and rejoins
74 // A
75 // / \'
76 // D E
77 // \ |
78 // \ B
79 // \|
80 // C
81 layerA->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
82 layerA->GetOutputSlot(0).Connect(layerE->GetInputSlot(0));
83 layerE->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
84 layerD->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
85 layerB->GetOutputSlot(0).Connect(layerC->GetInputSlot(1));
86 layerC->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
87
88 // check order is valid
89 BOOST_TEST(CheckOrder(graph, layerA, layerD));
90 BOOST_TEST(CheckOrder(graph, layerA, layerE));
91 BOOST_TEST(CheckOrder(graph, layerD, layerC));
92 BOOST_TEST(CheckOrder(graph, layerE, layerB));
93 BOOST_TEST(CheckOrder(graph, layerB, layerC));
94}
95
96BOOST_AUTO_TEST_CASE(InsertNewLayer)
97{
98 armnn::Graph graph;
99 armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
100
101 std::vector<armnn::Layer*> order;
102
103 armnn::ActivationDescriptor activationDefaults;
104 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
105 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
106 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
107 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
108 BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
109
110 armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
111 armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
112 armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
113 armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
114 armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
115
116 // A
117 // / \'
118 // B C
119 // \ /
120 // D
121 layerA->GetOutputSlot(0).SetTensorInfo(tensorInfo);
122 layerB->GetOutputSlot(0).SetTensorInfo(tensorInfo);
123 layerC->GetOutputSlot(0).SetTensorInfo(tensorInfo);
124 layerD->GetOutputSlot(0).SetTensorInfo(tensorInfo);
125
126 layerA->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
127 layerA->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
128 layerB->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
129 layerC->GetOutputSlot(0).Connect(layerD->GetInputSlot(1));
130 layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
131
132 // check order is valid
133 BOOST_TEST(CheckOrder(graph, layerA, layerB));
134 BOOST_TEST(CheckOrder(graph, layerA, layerC));
135 BOOST_TEST(CheckOrder(graph, layerB, layerD));
136 BOOST_TEST(CheckOrder(graph, layerC, layerD));
137
138 // A
139 // / \'
140 // B C
141 // \ |
142 // \ E
143 // \|
144 // D
145 BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerD->GetInputSlot(1),
146 activationDefaults,
147 "layerE"));
148
149 armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
150
151 // check order is valid
152 BOOST_TEST(CheckOrder(graph, layerA, layerB));
153 BOOST_TEST(CheckOrder(graph, layerA, layerC));
154 BOOST_TEST(CheckOrder(graph, layerB, layerD));
155 BOOST_TEST(CheckOrder(graph, layerC, layerE));
156 BOOST_TEST(CheckOrder(graph, layerE, layerD));
157
158 // A
159 // /|
160 // / F
161 // / |
162 // B C
163 // \ |
164 // \ E
165 // \|
166 // D
167 BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetInputSlot(0),
168 activationDefaults,
169 "layerF"));
170
171 armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
172
173 // check order is valid
174 BOOST_TEST(CheckOrder(graph, layerA, layerB));
175 BOOST_TEST(CheckOrder(graph, layerA, layerF));
176 BOOST_TEST(CheckOrder(graph, layerF, layerC));
177 BOOST_TEST(CheckOrder(graph, layerB, layerD));
178 BOOST_TEST(CheckOrder(graph, layerC, layerE));
179 BOOST_TEST(CheckOrder(graph, layerE, layerD));
180}
181
182namespace
183{
184 using Edge = std::pair<const armnn::Layer*, const armnn::Layer*>;
185}
186
187static std::vector<Edge> GetEdgeList(const armnn::Graph& graph)
188{
189 std::vector<Edge> edges;
190
191 for (auto&& srcLayer: graph)
192 {
193 const unsigned int numOutputSlots = srcLayer->GetNumOutputSlots();
194 for (unsigned int s = 0; s < numOutputSlots; ++s)
195 {
196 const armnn::IOutputSlot& outputSlot = srcLayer->GetOutputSlot(s);
197 const unsigned int numConnections = outputSlot.GetNumConnections();
198 for (unsigned int c = 0; c < numConnections; ++c)
199 {
200 auto inputSlot = boost::polymorphic_downcast<const armnn::InputSlot*>(outputSlot.GetConnection(c));
201 edges.emplace_back(srcLayer, &inputSlot->GetOwningLayer());
202 }
203 }
204 }
205
206 return edges;
207}
208
209static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armnn::Graph& origGraph)
210{
211 std::vector<Edge> origEdges = GetEdgeList(origGraph);
212 std::vector<Edge> newEdges = GetEdgeList(graph);
213
214 // Adding copy layers should not produce any duplicate edges
215 {
216 std::vector<Edge> sortedNewEdges = newEdges;
217 std::sort(sortedNewEdges.begin(), sortedNewEdges.end());
218
219 auto last = std::unique(sortedNewEdges.begin(), sortedNewEdges.end());
220 BOOST_CHECK_MESSAGE(last == sortedNewEdges.end(), "New graph contains duplicate edges!");
221 }
222
223 // Each new edge must be tested
224 while (!newEdges.empty())
225 {
226 const Edge edge = std::move(newEdges.back());
227 newEdges.pop_back();
228
229 // Edge present in the original graph?
230 int originalEdge = -1;
231 for (unsigned int i = 0; i < origEdges.size(); i++)
232 {
233 const Edge& origEdge = origEdges[i];
234 if (origEdge.first->GetNameStr() == edge.first->GetNameStr() &&
235 origEdge.second->GetNameStr() == edge.second->GetNameStr())
236 {
237 originalEdge = boost::numeric_cast<int>(i);
238 }
239 }
240
241 if (originalEdge != -1)
242 {
243 // Each vertex should correspond to a layer.
244 const armnn::Layer* srcLayer = edge.first;
245 const armnn::Layer* dstLayer = edge.second;
246 BOOST_TEST(srcLayer);
247 BOOST_TEST(dstLayer);
248
249 // Both layers must have the same compute device.
250 if (srcLayer && dstLayer)
251 {
252 BOOST_TEST((srcLayer->GetComputeDevice() == dstLayer->GetComputeDevice()));
253 }
254
255 // Mark edge in original graph as observed (by deleting it)
256 origEdges.erase(origEdges.begin() + originalEdge);
257 }
258 else
259 {
260 // Edge did not exist in the original graph.
261 // It must then be an edge connecting a layer and a copy layer.
262 const armnn::Layer* srcLayer = edge.first;
263 const armnn::Layer* dstLayer = edge.second;
264
265 if (srcLayer == nullptr || dstLayer == nullptr)
266 {
267 BOOST_ERROR("At least one of the two ends of a new edge (" << edge.first << ", " << edge.second << ") "
268 "introduced after adding copy layers to a graph correspond is not known to the graph");
269 continue;
270 }
271
272 // One and only one of the two layers referenced by the edge should be present in the original graph.
273 const bool srcLayerInOrigGraph = GraphHasNamedLayer(origGraph, edge.first->GetNameStr());
274 const bool dstLayerInOrigGraph = GraphHasNamedLayer(origGraph, edge.second->GetNameStr());
275
276 if (srcLayerInOrigGraph == dstLayerInOrigGraph)
277 {
278 BOOST_ERROR("A new edge ("
279 << edge.first->GetName()
280 << ", "
281 << edge.second->GetName()
282 << ") introduced after adding copy "
283 "layers to a graph is invalid. One of the ends should be present in the original "
284 "graph and the other should not, but "
285 << (srcLayerInOrigGraph ? "both are" : "none are"));
286 continue;
287 }
288
289 const armnn::Layer* copyLayer = srcLayerInOrigGraph ? edge.second : edge.first;
290 const armnn::Layer* nonCopyLayer = srcLayerInOrigGraph ? srcLayer : dstLayer;
291
292 // Find all edges connecting the copy layer to other layers
293 std::vector<Edge> adjEdges;
294 auto it = newEdges.begin();
295 while (it != newEdges.end())
296 {
297 Edge& newEdge = *it;
298 if (copyLayer == (srcLayerInOrigGraph ? newEdge.first : newEdge.second))
299 {
300 adjEdges.push_back(newEdge);
301
302 // Since the adjacent edge is immediately tested below, no need to consider it afterwards
303 it = newEdges.erase(it);
304 }
305 else
306 {
307 it++;
308 }
309 }
310
311 if (adjEdges.empty())
312 {
313 BOOST_ERROR("An edge connecting a layer and a copy layer exists, (" << edge.first << ", " <<
314 edge.second << "), but no other edges connecting the copy layer '" << copyLayer->GetName()
315 << "' to other layers could be found");
316 continue;
317 }
318
319 // Test adjacent edges now
320 for (const Edge& adjEdge : adjEdges)
321 {
322 // The adjacent edge must connect the copy layer to another layer
323 const armnn::Layer* adjLayer = srcLayerInOrigGraph ? adjEdge.second : adjEdge.first;
324
325 if (!adjLayer)
326 {
327 BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second <<") is adjacent to an edge "
328 "connecting a layer and a copy layer, (" << edge.first << ", " << edge.second << "), "
329 "but the non-copy layer in the former, '" << adjLayer->GetName() << "' does not "
330 "correspond to a layer");
331 continue;
332 }
333
334 // Both layers must have different compute devices
335 BOOST_TEST((nonCopyLayer->GetComputeDevice() != adjLayer->GetComputeDevice()));
336
337 // There must exist an edge connecting both layers directly in the original graph
338 {
339 const armnn::Layer* origEdgeN1 = srcLayerInOrigGraph ? nonCopyLayer : adjLayer;
340 const armnn::Layer* origEdgeN2 = srcLayerInOrigGraph ? adjLayer : nonCopyLayer;
341 auto origEdgeIter = std::find(origEdges.begin(), origEdges.end(),
342 Edge(origEdgeN1, origEdgeN2));
343
344 if (origEdgeIter != origEdges.end())
345 {
346 origEdges.erase(origEdgeIter);
347 }
348 else
349 {
350 BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second << ") is adjacent to an "
351 "edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
352 "), but there is no edge connecting the layers in the original graph");
353 }
354 }
355 }
356 }
357 }
358
359 BOOST_TEST(origEdges.empty(), "Not all of the edges in the original graph correspond to paths in the new graph");
360}
361
362struct CopyLayersFixture
363{
364 CopyLayersFixture()
365 {
366 using namespace armnn;
367 using namespace std;
368
369 Layer* const inputLayer = AddLayer<InputLayer>(0, "input");
370 inputLayer->SetComputeDevice(Compute::CpuRef);
371
372 Convolution2dDescriptor convolutionDefaults;
373 Layer* const convLayer1 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv1");
374 convLayer1->SetComputeDevice(Compute::CpuRef);
375
376 inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
377
378 Layer* const convLayer2 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv2");
379 convLayer2->SetComputeDevice(Compute::CpuRef);
380
381 convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
382
383 armnn::OriginsDescriptor mergerDefaults(2);
384 Layer* const mergerLayer = AddLayer<MergerLayer>(mergerDefaults, "merger");
385 mergerLayer->SetComputeDevice(armnn::Compute::CpuRef);
386
387 convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
388 convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
389
390 armnn::ActivationDescriptor activationDefaults;
391 Layer* const actLayer = AddLayer<ActivationLayer>(activationDefaults, "act");
392 actLayer->SetComputeDevice(armnn::Compute::CpuRef);
393
394 mergerLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0));
395
396 armnn::SoftmaxDescriptor softmaxDefaults;
397 Layer* const softmaxLayer = AddLayer<SoftmaxLayer>(softmaxDefaults, "softmax");
398 softmaxLayer->SetComputeDevice(armnn::Compute::CpuRef);
399
400 actLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0));
401
402 Layer* const outputLayer = AddLayer<OutputLayer>(0, "output");
403 outputLayer->SetComputeDevice(armnn::Compute::CpuRef);
404
405 softmaxLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
406 }
407
408 armnn::TensorInfo m_TensorDesc;
409 armnn::Graph m_Graph;
410
411private:
412
413 template <typename LayerType, typename... Args>
414 LayerType* AddLayer(Args&&... args)
415 {
416 LayerType* const layer = m_Graph.AddLayer<LayerType>(std::forward<Args>(args)...);
417
418 for (auto slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
419 {
420 slot->SetTensorInfo(m_TensorDesc);
421 }
422
423 return layer;
424 };
425};
426
427BOOST_FIXTURE_TEST_CASE(AddCopyLayers, CopyLayersFixture)
428{
429 const armnn::Graph origGraph(m_Graph);
430 m_Graph.AddCopyLayers();
431
432 TestGraphAfterAddingCopyLayers(m_Graph, origGraph);
433}
434
435BOOST_FIXTURE_TEST_CASE(AddCopyLayersSeveralTimes, CopyLayersFixture)
436{
437 m_Graph.AddCopyLayers();
438
439 // Calling AddCopyLayers() several times should not change the connections
440 const std::vector<Edge> edges = GetEdgeList(m_Graph);
441 for (int i = 0; i < 4; ++i)
442 {
443 m_Graph.AddCopyLayers();
444 const std::vector<Edge> otherEdges = GetEdgeList(m_Graph);
445 BOOST_TEST((edges == otherEdges));
446 }
447}
448
449BOOST_AUTO_TEST_CASE(CopyLayersAddedBetweenSameLayersHaveDifferentNames)
450{
451 armnn::Graph graph;
452
453 armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
454 inputLayer->SetComputeDevice(armnn::Compute::CpuRef);
455
456 armnn::ViewsDescriptor splitterDesc(2);
457 armnn::SplitterLayer* const splitterLayer = graph.AddLayer<armnn::SplitterLayer>(splitterDesc, "splitter");
458 splitterLayer->SetComputeDevice(armnn::Compute::GpuAcc);
459
460 armnn::AdditionLayer* const additionLayer = graph.AddLayer<armnn::AdditionLayer>("addition");
461 additionLayer->SetComputeDevice(armnn::Compute::CpuRef);
462
463 armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
464 outputLayer->SetComputeDevice(armnn::Compute::CpuRef);
465
466 inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
467 splitterLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
468 splitterLayer->GetOutputSlot(1).Connect(additionLayer->GetInputSlot(1));
469 additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
470
471 graph.AddCopyLayers();
472
473 std::vector<Edge> edges = GetEdgeList(graph);
474 BOOST_CHECK(edges.size() == 7u);
475 std::sort(edges.begin(), edges.end());
476 auto last = std::unique(edges.begin(), edges.end());
477 BOOST_CHECK_MESSAGE(last == edges.end(), "Found duplicated edges after AddCopyLayers()");
478}
479
480BOOST_AUTO_TEST_CASE(DuplicateLayerNames)
481{
482 armnn::Graph graph;
483
484 armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "layer");
485 inputLayer->SetComputeDevice(armnn::Compute::CpuRef);
486
487 armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "layer");
488 outputLayer->SetComputeDevice(armnn::Compute::CpuRef);
489
490 inputLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
491
492 auto it = graph.TopologicalSort().begin();
493 BOOST_TEST(((*it)->GetType() == armnn::LayerType::Input));
494 BOOST_TEST(((*std::next(it))->GetType() == armnn::LayerType::Output));
495}
496
497BOOST_AUTO_TEST_SUITE_END()