blob: 2c992bc10b63b8acfdec2c348660a7d1de18eb77 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007#include <Graph.hpp>
8
9#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010
11#include <boost/core/ignore_unused.hpp>
12
13namespace
14{
15armnn::Graph dummyGraph;
16
telsoa01c577f2c2018-08-31 09:22:23 +010017// Make a dummy TensorInfo object.
telsoa014fcda012018-03-09 14:13:49 +000018template<armnn::DataType DataType>
19armnn::TensorInfo MakeDummyTensorInfo()
20{
21 return armnn::TensorInfo({2,2,2,2}, DataType);
22}
23
24
25// Make a dummy WorkloadInfo using a dummy TensorInfo.
26template<armnn::DataType DataType>
27armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
28{
29 armnn::WorkloadInfo info;
30 for (unsigned int i=0; i < numInputs; i++)
31 {
32 info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
33 }
34 for (unsigned int o=0; o < numOutputs; o++)
35 {
36 info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
37 }
38 return info;
39}
40
telsoa01c577f2c2018-08-31 09:22:23 +010041// Template class to create a dummy layer (2 parameters).
telsoa014fcda012018-03-09 14:13:49 +000042template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
43struct DummyLayer
44{
45 DummyLayer()
46 {
47 m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
48 }
49 ~DummyLayer()
50 {
51 dummyGraph.EraseLayer(m_Layer);
52 }
53 LayerType* m_Layer;
54};
55
telsoa01c577f2c2018-08-31 09:22:23 +010056// Template class to create a dummy layer (1 parameter).
telsoa014fcda012018-03-09 14:13:49 +000057template<typename LayerType>
58struct DummyLayer<LayerType, void>
59{
60 DummyLayer()
61 {
62 m_Layer = dummyGraph.AddLayer<LayerType>("");
63 }
64 ~DummyLayer()
65 {
66 dummyGraph.EraseLayer(m_Layer);
67 }
68 LayerType* m_Layer;
69};
70
71template<>
telsoa01c577f2c2018-08-31 09:22:23 +010072struct DummyLayer<armnn::BatchNormalizationLayer>
73{
74 DummyLayer()
75 {
76 m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
77 m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
78 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
79 m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
80 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
81 m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
82 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
83 m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
84 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
85 }
86 ~DummyLayer()
87 {
88 dummyGraph.EraseLayer(m_Layer);
89 }
90 armnn::BatchNormalizationLayer* m_Layer;
91
92};
93
94template<>
telsoa014fcda012018-03-09 14:13:49 +000095struct DummyLayer<armnn::ConstantLayer, void>
96{
97 DummyLayer()
98 {
telsoa01c577f2c2018-08-31 09:22:23 +010099 m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
telsoa014fcda012018-03-09 14:13:49 +0000100 }
101 ~DummyLayer()
102 {
103 dummyGraph.EraseLayer(m_Layer);
104 }
105 armnn::ConstantLayer* m_Layer;
106};
107
108template<>
109struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
110{
111 DummyLayer()
112 {
113 m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
114
115 }
116 ~DummyLayer()
117 {
118 dummyGraph.EraseLayer(m_Layer);
119 }
120 armnn::InputLayer* m_Layer;
121};
122
123template<>
124struct DummyLayer<armnn::MergerLayer>
125{
126 DummyLayer()
127 {
128 armnn::OriginsDescriptor desc(2);
129 m_Layer = dummyGraph.AddLayer<armnn::MergerLayer>(desc, "");
130
131 }
132 ~DummyLayer()
133 {
134 dummyGraph.EraseLayer(m_Layer);
135 }
136 armnn::MergerLayer* m_Layer;
137};
138
139template<>
140struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
141{
142 DummyLayer()
143 {
144 m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
145
146 }
147 ~DummyLayer()
148 {
149 dummyGraph.EraseLayer(m_Layer);
150 }
151 armnn::OutputLayer* m_Layer;
152};
153
154template<>
155struct DummyLayer<armnn::SplitterLayer>
156{
157 DummyLayer()
158 {
159 armnn::ViewsDescriptor desc(1);
160 m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
161
162 }
163 ~DummyLayer()
164 {
165 dummyGraph.EraseLayer(m_Layer);
166 }
167 armnn::SplitterLayer* m_Layer;
168};
169
170template <typename ConvolutionLayerType>
171struct DummyConvolutionLayer
172{
173 DummyConvolutionLayer()
174 {
175 typename ConvolutionLayerType::DescriptorType desc;
176 m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
177 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
178 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
179 m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
180 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
181 }
182 ~DummyConvolutionLayer()
183 {
184 dummyGraph.EraseLayer(m_Layer);
185 }
186 ConvolutionLayerType* m_Layer;
187};
188
189template<>
190struct DummyLayer<armnn::Convolution2dLayer>
191 : public DummyConvolutionLayer<armnn::Convolution2dLayer>
192{
193};
194
195template<>
196struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
197 : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
198{
199};
200
telsoa01c577f2c2018-08-31 09:22:23 +0100201template <typename LstmLayerType>
202struct DummyLstmLayer
203{
204 DummyLstmLayer()
205 {
206 typename LstmLayerType::DescriptorType desc;
207 desc.m_CifgEnabled = false;
208
209 m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
210 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
211 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
212 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
213 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
214 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
215 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
216 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
217 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
218 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
219 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
220 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
221 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
222 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
223 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
224 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
225 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
226 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
227 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
228
229 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
230 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
231 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
232 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
233 m_Layer->m_CifgParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
234 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
235 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
236 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
237 }
238 ~DummyLstmLayer()
239 {
240 dummyGraph.EraseLayer(m_Layer);
241 }
242 armnn::LstmLayer* m_Layer;
243};
244
245template<>
246struct DummyLayer<armnn::LstmLayer>
247 : public DummyLstmLayer<armnn::LstmLayer>
248{
249};
250
251template<>
252struct DummyLayer<armnn::FullyConnectedLayer>
253{
254 DummyLayer()
255 {
256 armnn::FullyConnectedLayer::DescriptorType desc;
257 m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
258 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
259 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
260 }
261 ~DummyLayer()
262 {
263 dummyGraph.EraseLayer(m_Layer);
264 }
265 armnn::FullyConnectedLayer* m_Layer;
266};
267
telsoa014fcda012018-03-09 14:13:49 +0000268// Tag for giving LayerType entries a unique strong type each.
269template<armnn::LayerType>
270struct Tag{};
271
272#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
273template<armnn::DataType DataType> \
274struct LayerTypePolicy<armnn::LayerType::name, DataType> \
275{ \
276 using Type = armnn::name##Layer; \
277 using Desc = descType; \
278 using QueueDesc = armnn::name##QueueDescriptor; \
279 constexpr static const char* NameStr = #name; \
280 \
281 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
282 unsigned int nIn, unsigned int nOut) \
283 { \
284 QueueDesc desc; \
285 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
286 return factory->Create##name(desc, info); \
287 } \
288};
289
telsoa01c577f2c2018-08-31 09:22:23 +0100290// Define a layer policy specialization for use with the IsLayerSupported tests.
telsoa014fcda012018-03-09 14:13:49 +0000291// Use this version for layers whose constructor takes 1 parameter(name).
292#define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
293
telsoa01c577f2c2018-08-31 09:22:23 +0100294// Define a layer policy specialization for use with the IsLayerSupported tests.
telsoa014fcda012018-03-09 14:13:49 +0000295// Use this version for layers whose constructor takes 2 parameters(descriptor and name).
296#define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
297
telsoa01c577f2c2018-08-31 09:22:23 +0100298// Layer policy template.
telsoa014fcda012018-03-09 14:13:49 +0000299template<armnn::LayerType Type, armnn::DataType DataType>
300struct LayerTypePolicy;
301
302// Every entry in the armnn::LayerType enum must be accounted for below.
303DECLARE_LAYER_POLICY_2_PARAM(Activation)
304
305DECLARE_LAYER_POLICY_1_PARAM(Addition)
306
307DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
308
309DECLARE_LAYER_POLICY_1_PARAM(Constant)
310
telsoa01c577f2c2018-08-31 09:22:23 +0100311DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
312
313DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
314
telsoa014fcda012018-03-09 14:13:49 +0000315DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
316
317DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
318
319DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
320
321DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
322
323DECLARE_LAYER_POLICY_1_PARAM(Floor)
324
325DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
326
327DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
328
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100329DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
telsoa014fcda012018-03-09 14:13:49 +0000330
telsoa01c577f2c2018-08-31 09:22:23 +0100331DECLARE_LAYER_POLICY_2_PARAM(Lstm)
332
narpra0132b90462018-09-13 11:07:48 +0100333DECLARE_LAYER_POLICY_2_PARAM(Mean)
334
telsoa014fcda012018-03-09 14:13:49 +0000335DECLARE_LAYER_POLICY_2_PARAM(Merger)
336
337DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
338
339DECLARE_LAYER_POLICY_2_PARAM(Normalization)
340
341DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
342
Mohamed Nour Abouelseoud5662c202018-09-24 13:30:09 +0100343DECLARE_LAYER_POLICY_2_PARAM(Pad)
344
telsoa014fcda012018-03-09 14:13:49 +0000345DECLARE_LAYER_POLICY_2_PARAM(Permute)
346
347DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
348
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100349DECLARE_LAYER_POLICY_1_PARAM(Division)
350
telsoa014fcda012018-03-09 14:13:49 +0000351DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)
352
telsoa01c577f2c2018-08-31 09:22:23 +0100353DECLARE_LAYER_POLICY_2_PARAM(Reshape)
354
telsoa014fcda012018-03-09 14:13:49 +0000355DECLARE_LAYER_POLICY_2_PARAM(Softmax)
356
Nattapat Chaimanowong207ef9a2018-11-02 10:57:25 +0000357DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
358
telsoa014fcda012018-03-09 14:13:49 +0000359DECLARE_LAYER_POLICY_2_PARAM(Splitter)
360
David Beckc2044fe2018-09-05 15:00:38 +0100361DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
telsoa014fcda012018-03-09 14:13:49 +0000362
363
364// Generic implementation to get the number of input slots for a given layer type;
365template<armnn::LayerType Type>
366unsigned int GetNumInputs(const armnn::Layer& layer)
367{
368 return layer.GetNumInputSlots();
369}
370
371// Generic implementation to get the number of output slots for a given layer type;
372template<armnn::LayerType Type>
373unsigned int GetNumOutputs(const armnn::Layer& layer)
374{
375 return layer.GetNumOutputSlots();
376}
377
378template<>
379unsigned int GetNumInputs<armnn::LayerType::Merger>(const armnn::Layer& layer)
380{
381 boost::ignore_unused(layer);
382 return 2;
383}
384
telsoa01c577f2c2018-08-31 09:22:23 +0100385// Tests that the IsLayerSupported() function returns the correct value.
386// We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
telsoa014fcda012018-03-09 14:13:49 +0000387// Returns true if expectations are met, otherwise returns false.
388template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
389bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
390{
391 using LayerPolicy = LayerTypePolicy<Type, DataType>;
392 using LayerType = typename LayerPolicy::Type;
393 using LayerDesc = typename LayerPolicy::Desc;
394 DummyLayer<LayerType, LayerDesc> layer;
395
396 unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
397 unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
398
telsoa01c577f2c2018-08-31 09:22:23 +0100399 // Make another dummy layer just to make IsLayerSupported have valid inputs.
telsoa014fcda012018-03-09 14:13:49 +0000400 DummyLayer<armnn::ConstantLayer, void> previousLayer;
telsoa01c577f2c2018-08-31 09:22:23 +0100401 // Set output of the previous layer to a dummy tensor.
telsoa014fcda012018-03-09 14:13:49 +0000402 armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
403 previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
telsoa01c577f2c2018-08-31 09:22:23 +0100404 // Connect all outputs of the previous layer to inputs of tested layer.
telsoa014fcda012018-03-09 14:13:49 +0000405 for (unsigned int i = 0; i < numIn; i++)
406 {
407 armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
408 armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
409 previousLayerOutputSlot.Connect(layerInputSlot);
410 }
telsoa01c577f2c2018-08-31 09:22:23 +0100411 // Set outputs of tested layer to a dummy tensor.
telsoa014fcda012018-03-09 14:13:49 +0000412 for (unsigned int i = 0; i < numOut; i++)
413 {
414 layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
415 }
416
417 std::string layerName = LayerPolicy::NameStr;
418 std::string reasonIfUnsupported;
419 if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
420 {
421 std::string errorMsg = " layer expected support but found none.";
422 try
423 {
424 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
Matteo Martincighfbebcbd2018-10-16 09:45:08 +0100425 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
telsoa014fcda012018-03-09 14:13:49 +0000426 return retVal;
427 }
telsoa01c577f2c2018-08-31 09:22:23 +0100428 catch(const armnn::InvalidArgumentException& e)
telsoa014fcda012018-03-09 14:13:49 +0000429 {
430 boost::ignore_unused(e);
431 // This is ok since we throw InvalidArgumentException when creating the dummy workload.
432 return true;
433 }
434 catch(const std::exception& e)
435 {
436 errorMsg = e.what();
437 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
438 return false;
439 }
telsoa01c577f2c2018-08-31 09:22:23 +0100440 catch(...)
telsoa014fcda012018-03-09 14:13:49 +0000441 {
442 errorMsg = "Unexpected error while testing support for ";
443 BOOST_TEST_ERROR(errorMsg << layerName);
444 return false;
445 }
446 }
447 else
448 {
449 std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
450 try
451 {
452 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
453 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
454 return retVal;
455 }
456 // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
457 // using parameters that make IsLayerSupported() return false should throw an
telsoa01c577f2c2018-08-31 09:22:23 +0100458 // InvalidArgumentException or UnimplementedException.
telsoa014fcda012018-03-09 14:13:49 +0000459 catch(const armnn::InvalidArgumentException& e)
460 {
461 boost::ignore_unused(e);
462 return true;
463 }
telsoa01c577f2c2018-08-31 09:22:23 +0100464 catch(const armnn::UnimplementedException& e)
telsoa014fcda012018-03-09 14:13:49 +0000465 {
466 boost::ignore_unused(e);
467 return true;
468 }
469 catch(const std::exception& e)
470 {
471 errorMsg = e.what();
472 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
473 return false;
474 }
telsoa01c577f2c2018-08-31 09:22:23 +0100475 catch(...)
telsoa014fcda012018-03-09 14:13:49 +0000476 {
477 errorMsg = "Unexpected error while testing support for ";
478 BOOST_TEST_ERROR(errorMsg << layerName);
479 return false;
480 }
481 }
482}
483
telsoa01c577f2c2018-08-31 09:22:23 +0100484// Helper function to compute the next type in the LayerType enum.
telsoa014fcda012018-03-09 14:13:49 +0000485constexpr armnn::LayerType NextType(armnn::LayerType type)
486{
487 return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
488}
489
telsoa01c577f2c2018-08-31 09:22:23 +0100490// Termination function for determining the end of the LayerType enumeration.
telsoa014fcda012018-03-09 14:13:49 +0000491template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
492bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
493{
494 return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
495};
496
telsoa01c577f2c2018-08-31 09:22:23 +0100497// Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
telsoa014fcda012018-03-09 14:13:49 +0000498template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
499bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
500{
501 bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
502
503 return v &&
504 IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
505 (factory, Tag<NextType(Type)>());
506};
507
508// Helper function to pass through to the test framework.
509template<typename FactoryType, armnn::DataType DataType>
510bool IsLayerSupportedTests(FactoryType *factory)
511{
512 return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
513};
514
515template<armnn::LayerType Type>
516bool TestLayerTypeMatches()
517{
518 using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
519 using LayerType = typename LayerPolicy::Type;
520 using LayerDesc = typename LayerPolicy::Desc;
521 DummyLayer<LayerType, LayerDesc> layer;
522
523 std::stringstream ss;
524 ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
525 bool v = Type == layer.m_Layer->GetType();
526 BOOST_CHECK_MESSAGE(v, ss.str());
527 return v;
528};
529
530template<armnn::LayerType Type>
531bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
532{
533 return TestLayerTypeMatches<Type>();
534};
535
536template<armnn::LayerType Type>
537bool LayerTypeMatchesTestImpl(Tag<Type>)
538{
539 return TestLayerTypeMatches<Type>() &&
540 LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
541};
542
telsoa01c577f2c2018-08-31 09:22:23 +0100543template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
544bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
545{
546 armnn::Graph graph;
547 LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
548
549 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
550 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
551
552 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
553 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
554
555 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
556 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
557 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
558 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
559
560 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
561
562 return result;
563};
564
telsoa014fcda012018-03-09 14:13:49 +0000565} //namespace