telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
David Beck | ecb56cd | 2018-09-05 12:52:57 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 4 | // |
| 5 | #pragma once |
| 6 | |
| 7 | #include "Graph.hpp" |
| 8 | |
| 9 | #include <boost/core/ignore_unused.hpp> |
| 10 | |
| 11 | namespace |
| 12 | { |
| 13 | armnn::Graph dummyGraph; |
| 14 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 15 | // Make a dummy TensorInfo object. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 16 | template<armnn::DataType DataType> |
| 17 | armnn::TensorInfo MakeDummyTensorInfo() |
| 18 | { |
| 19 | return armnn::TensorInfo({2,2,2,2}, DataType); |
| 20 | } |
| 21 | |
| 22 | |
| 23 | // Make a dummy WorkloadInfo using a dummy TensorInfo. |
| 24 | template<armnn::DataType DataType> |
| 25 | armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs) |
| 26 | { |
| 27 | armnn::WorkloadInfo info; |
| 28 | for (unsigned int i=0; i < numInputs; i++) |
| 29 | { |
| 30 | info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>()); |
| 31 | } |
| 32 | for (unsigned int o=0; o < numOutputs; o++) |
| 33 | { |
| 34 | info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>()); |
| 35 | } |
| 36 | return info; |
| 37 | } |
| 38 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 39 | // Template class to create a dummy layer (2 parameters). |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 40 | template<typename LayerType, typename DescType = typename LayerType::DescriptorType> |
| 41 | struct DummyLayer |
| 42 | { |
| 43 | DummyLayer() |
| 44 | { |
| 45 | m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), ""); |
| 46 | } |
| 47 | ~DummyLayer() |
| 48 | { |
| 49 | dummyGraph.EraseLayer(m_Layer); |
| 50 | } |
| 51 | LayerType* m_Layer; |
| 52 | }; |
| 53 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 54 | // Template class to create a dummy layer (1 parameter). |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 55 | template<typename LayerType> |
| 56 | struct DummyLayer<LayerType, void> |
| 57 | { |
| 58 | DummyLayer() |
| 59 | { |
| 60 | m_Layer = dummyGraph.AddLayer<LayerType>(""); |
| 61 | } |
| 62 | ~DummyLayer() |
| 63 | { |
| 64 | dummyGraph.EraseLayer(m_Layer); |
| 65 | } |
| 66 | LayerType* m_Layer; |
| 67 | }; |
| 68 | |
| 69 | template<> |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 70 | struct DummyLayer<armnn::BatchNormalizationLayer> |
| 71 | { |
| 72 | DummyLayer() |
| 73 | { |
| 74 | m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), ""); |
| 75 | m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 76 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 77 | m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 78 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 79 | m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 80 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 81 | m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 82 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 83 | } |
| 84 | ~DummyLayer() |
| 85 | { |
| 86 | dummyGraph.EraseLayer(m_Layer); |
| 87 | } |
| 88 | armnn::BatchNormalizationLayer* m_Layer; |
| 89 | |
| 90 | }; |
| 91 | |
| 92 | template<> |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 93 | struct DummyLayer<armnn::ConstantLayer, void> |
| 94 | { |
| 95 | DummyLayer() |
| 96 | { |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 97 | m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>(""); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 98 | } |
| 99 | ~DummyLayer() |
| 100 | { |
| 101 | dummyGraph.EraseLayer(m_Layer); |
| 102 | } |
| 103 | armnn::ConstantLayer* m_Layer; |
| 104 | }; |
| 105 | |
| 106 | template<> |
| 107 | struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId> |
| 108 | { |
| 109 | DummyLayer() |
| 110 | { |
| 111 | m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), ""); |
| 112 | |
| 113 | } |
| 114 | ~DummyLayer() |
| 115 | { |
| 116 | dummyGraph.EraseLayer(m_Layer); |
| 117 | } |
| 118 | armnn::InputLayer* m_Layer; |
| 119 | }; |
| 120 | |
| 121 | template<> |
| 122 | struct DummyLayer<armnn::MergerLayer> |
| 123 | { |
| 124 | DummyLayer() |
| 125 | { |
| 126 | armnn::OriginsDescriptor desc(2); |
| 127 | m_Layer = dummyGraph.AddLayer<armnn::MergerLayer>(desc, ""); |
| 128 | |
| 129 | } |
| 130 | ~DummyLayer() |
| 131 | { |
| 132 | dummyGraph.EraseLayer(m_Layer); |
| 133 | } |
| 134 | armnn::MergerLayer* m_Layer; |
| 135 | }; |
| 136 | |
| 137 | template<> |
| 138 | struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId> |
| 139 | { |
| 140 | DummyLayer() |
| 141 | { |
| 142 | m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), ""); |
| 143 | |
| 144 | } |
| 145 | ~DummyLayer() |
| 146 | { |
| 147 | dummyGraph.EraseLayer(m_Layer); |
| 148 | } |
| 149 | armnn::OutputLayer* m_Layer; |
| 150 | }; |
| 151 | |
| 152 | template<> |
| 153 | struct DummyLayer<armnn::SplitterLayer> |
| 154 | { |
| 155 | DummyLayer() |
| 156 | { |
| 157 | armnn::ViewsDescriptor desc(1); |
| 158 | m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, ""); |
| 159 | |
| 160 | } |
| 161 | ~DummyLayer() |
| 162 | { |
| 163 | dummyGraph.EraseLayer(m_Layer); |
| 164 | } |
| 165 | armnn::SplitterLayer* m_Layer; |
| 166 | }; |
| 167 | |
| 168 | template <typename ConvolutionLayerType> |
| 169 | struct DummyConvolutionLayer |
| 170 | { |
| 171 | DummyConvolutionLayer() |
| 172 | { |
| 173 | typename ConvolutionLayerType::DescriptorType desc; |
| 174 | m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, ""); |
| 175 | m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 176 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 177 | m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 178 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 179 | } |
| 180 | ~DummyConvolutionLayer() |
| 181 | { |
| 182 | dummyGraph.EraseLayer(m_Layer); |
| 183 | } |
| 184 | ConvolutionLayerType* m_Layer; |
| 185 | }; |
| 186 | |
| 187 | template<> |
| 188 | struct DummyLayer<armnn::Convolution2dLayer> |
| 189 | : public DummyConvolutionLayer<armnn::Convolution2dLayer> |
| 190 | { |
| 191 | }; |
| 192 | |
| 193 | template<> |
| 194 | struct DummyLayer<armnn::DepthwiseConvolution2dLayer> |
| 195 | : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer> |
| 196 | { |
| 197 | }; |
| 198 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 199 | template <typename LstmLayerType> |
| 200 | struct DummyLstmLayer |
| 201 | { |
| 202 | DummyLstmLayer() |
| 203 | { |
| 204 | typename LstmLayerType::DescriptorType desc; |
| 205 | desc.m_CifgEnabled = false; |
| 206 | |
| 207 | m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), ""); |
| 208 | m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 209 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 210 | m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 211 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 212 | m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 213 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 214 | m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 215 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 216 | m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 217 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 218 | m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 219 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 220 | m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 221 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 222 | m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 223 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 224 | m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 225 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 226 | |
| 227 | m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 228 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 229 | m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 230 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 231 | m_Layer->m_CifgParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 232 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 233 | m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 234 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 235 | } |
| 236 | ~DummyLstmLayer() |
| 237 | { |
| 238 | dummyGraph.EraseLayer(m_Layer); |
| 239 | } |
| 240 | armnn::LstmLayer* m_Layer; |
| 241 | }; |
| 242 | |
| 243 | template<> |
| 244 | struct DummyLayer<armnn::LstmLayer> |
| 245 | : public DummyLstmLayer<armnn::LstmLayer> |
| 246 | { |
| 247 | }; |
| 248 | |
| 249 | template<> |
| 250 | struct DummyLayer<armnn::FullyConnectedLayer> |
| 251 | { |
| 252 | DummyLayer() |
| 253 | { |
| 254 | armnn::FullyConnectedLayer::DescriptorType desc; |
| 255 | m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, ""); |
| 256 | m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>( |
| 257 | armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); |
| 258 | } |
| 259 | ~DummyLayer() |
| 260 | { |
| 261 | dummyGraph.EraseLayer(m_Layer); |
| 262 | } |
| 263 | armnn::FullyConnectedLayer* m_Layer; |
| 264 | }; |
| 265 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 266 | // Tag for giving LayerType entries a unique strong type each. |
| 267 | template<armnn::LayerType> |
| 268 | struct Tag{}; |
| 269 | |
| 270 | #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \ |
| 271 | template<armnn::DataType DataType> \ |
| 272 | struct LayerTypePolicy<armnn::LayerType::name, DataType> \ |
| 273 | { \ |
| 274 | using Type = armnn::name##Layer; \ |
| 275 | using Desc = descType; \ |
| 276 | using QueueDesc = armnn::name##QueueDescriptor; \ |
| 277 | constexpr static const char* NameStr = #name; \ |
| 278 | \ |
| 279 | static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \ |
| 280 | unsigned int nIn, unsigned int nOut) \ |
| 281 | { \ |
| 282 | QueueDesc desc; \ |
| 283 | armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \ |
| 284 | return factory->Create##name(desc, info); \ |
| 285 | } \ |
| 286 | }; |
| 287 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 288 | // Define a layer policy specialization for use with the IsLayerSupported tests. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 289 | // Use this version for layers whose constructor takes 1 parameter(name). |
| 290 | #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void) |
| 291 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 292 | // Define a layer policy specialization for use with the IsLayerSupported tests. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 293 | // Use this version for layers whose constructor takes 2 parameters(descriptor and name). |
| 294 | #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor) |
| 295 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 296 | // Layer policy template. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 297 | template<armnn::LayerType Type, armnn::DataType DataType> |
| 298 | struct LayerTypePolicy; |
| 299 | |
| 300 | // Every entry in the armnn::LayerType enum must be accounted for below. |
| 301 | DECLARE_LAYER_POLICY_2_PARAM(Activation) |
| 302 | |
| 303 | DECLARE_LAYER_POLICY_1_PARAM(Addition) |
| 304 | |
| 305 | DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization) |
| 306 | |
| 307 | DECLARE_LAYER_POLICY_1_PARAM(Constant) |
| 308 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 309 | DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32) |
| 310 | |
| 311 | DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16) |
| 312 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 313 | DECLARE_LAYER_POLICY_2_PARAM(Convolution2d) |
| 314 | |
| 315 | DECLARE_LAYER_POLICY_1_PARAM(MemCopy) |
| 316 | |
| 317 | DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d) |
| 318 | |
| 319 | DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization) |
| 320 | |
| 321 | DECLARE_LAYER_POLICY_1_PARAM(Floor) |
| 322 | |
| 323 | DECLARE_LAYER_POLICY_2_PARAM(FullyConnected) |
| 324 | |
| 325 | DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId) |
| 326 | |
| 327 | DECLARE_LAYER_POLICY_1_PARAM(L2Normalization) |
| 328 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 329 | DECLARE_LAYER_POLICY_2_PARAM(Lstm) |
| 330 | |
narpra01 | 32b9046 | 2018-09-13 11:07:48 +0100 | [diff] [blame] | 331 | DECLARE_LAYER_POLICY_2_PARAM(Mean) |
| 332 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 333 | DECLARE_LAYER_POLICY_2_PARAM(Merger) |
| 334 | |
| 335 | DECLARE_LAYER_POLICY_1_PARAM(Multiplication) |
| 336 | |
| 337 | DECLARE_LAYER_POLICY_2_PARAM(Normalization) |
| 338 | |
| 339 | DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId) |
| 340 | |
Mohamed Nour Abouelseoud | 5662c20 | 2018-09-24 13:30:09 +0100 | [diff] [blame] | 341 | DECLARE_LAYER_POLICY_2_PARAM(Pad) |
| 342 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 343 | DECLARE_LAYER_POLICY_2_PARAM(Permute) |
| 344 | |
| 345 | DECLARE_LAYER_POLICY_2_PARAM(Pooling2d) |
| 346 | |
Francis Murtagh | e7a86a4 | 2018-08-29 12:42:10 +0100 | [diff] [blame] | 347 | DECLARE_LAYER_POLICY_1_PARAM(Division) |
| 348 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 349 | DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear) |
| 350 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 351 | DECLARE_LAYER_POLICY_2_PARAM(Reshape) |
| 352 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 353 | DECLARE_LAYER_POLICY_2_PARAM(Softmax) |
| 354 | |
| 355 | DECLARE_LAYER_POLICY_2_PARAM(Splitter) |
| 356 | |
David Beck | c2044fe | 2018-09-05 15:00:38 +0100 | [diff] [blame] | 357 | DECLARE_LAYER_POLICY_1_PARAM(Subtraction) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 358 | |
| 359 | |
| 360 | // Generic implementation to get the number of input slots for a given layer type; |
| 361 | template<armnn::LayerType Type> |
| 362 | unsigned int GetNumInputs(const armnn::Layer& layer) |
| 363 | { |
| 364 | return layer.GetNumInputSlots(); |
| 365 | } |
| 366 | |
| 367 | // Generic implementation to get the number of output slots for a given layer type; |
| 368 | template<armnn::LayerType Type> |
| 369 | unsigned int GetNumOutputs(const armnn::Layer& layer) |
| 370 | { |
| 371 | return layer.GetNumOutputSlots(); |
| 372 | } |
| 373 | |
| 374 | template<> |
| 375 | unsigned int GetNumInputs<armnn::LayerType::Merger>(const armnn::Layer& layer) |
| 376 | { |
| 377 | boost::ignore_unused(layer); |
| 378 | return 2; |
| 379 | } |
| 380 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 381 | // Tests that the IsLayerSupported() function returns the correct value. |
| 382 | // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 383 | // Returns true if expectations are met, otherwise returns false. |
| 384 | template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type> |
| 385 | bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>) |
| 386 | { |
| 387 | using LayerPolicy = LayerTypePolicy<Type, DataType>; |
| 388 | using LayerType = typename LayerPolicy::Type; |
| 389 | using LayerDesc = typename LayerPolicy::Desc; |
| 390 | DummyLayer<LayerType, LayerDesc> layer; |
| 391 | |
| 392 | unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer); |
| 393 | unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer); |
| 394 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 395 | // Make another dummy layer just to make IsLayerSupported have valid inputs. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 396 | DummyLayer<armnn::ConstantLayer, void> previousLayer; |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 397 | // Set output of the previous layer to a dummy tensor. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 398 | armnn::TensorInfo output = MakeDummyTensorInfo<DataType>(); |
| 399 | previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 400 | // Connect all outputs of the previous layer to inputs of tested layer. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 401 | for (unsigned int i = 0; i < numIn; i++) |
| 402 | { |
| 403 | armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0); |
| 404 | armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i); |
| 405 | previousLayerOutputSlot.Connect(layerInputSlot); |
| 406 | } |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 407 | // Set outputs of tested layer to a dummy tensor. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 408 | for (unsigned int i = 0; i < numOut; i++) |
| 409 | { |
| 410 | layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output); |
| 411 | } |
| 412 | |
| 413 | std::string layerName = LayerPolicy::NameStr; |
| 414 | std::string reasonIfUnsupported; |
| 415 | if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported)) |
| 416 | { |
| 417 | std::string errorMsg = " layer expected support but found none."; |
| 418 | try |
| 419 | { |
| 420 | bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr; |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 421 | // hacky way (it has to be replaced): for Lstm, we only support F32 right now |
| 422 | // BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 423 | return retVal; |
| 424 | } |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 425 | catch(const armnn::InvalidArgumentException& e) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 426 | { |
| 427 | boost::ignore_unused(e); |
| 428 | // This is ok since we throw InvalidArgumentException when creating the dummy workload. |
| 429 | return true; |
| 430 | } |
| 431 | catch(const std::exception& e) |
| 432 | { |
| 433 | errorMsg = e.what(); |
| 434 | BOOST_TEST_ERROR(layerName << ": " << errorMsg); |
| 435 | return false; |
| 436 | } |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 437 | catch(...) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 438 | { |
| 439 | errorMsg = "Unexpected error while testing support for "; |
| 440 | BOOST_TEST_ERROR(errorMsg << layerName); |
| 441 | return false; |
| 442 | } |
| 443 | } |
| 444 | else |
| 445 | { |
| 446 | std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some."; |
| 447 | try |
| 448 | { |
| 449 | bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr; |
| 450 | BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg); |
| 451 | return retVal; |
| 452 | } |
| 453 | // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them |
| 454 | // using parameters that make IsLayerSupported() return false should throw an |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 455 | // InvalidArgumentException or UnimplementedException. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 456 | catch(const armnn::InvalidArgumentException& e) |
| 457 | { |
| 458 | boost::ignore_unused(e); |
| 459 | return true; |
| 460 | } |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 461 | catch(const armnn::UnimplementedException& e) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 462 | { |
| 463 | boost::ignore_unused(e); |
| 464 | return true; |
| 465 | } |
| 466 | catch(const std::exception& e) |
| 467 | { |
| 468 | errorMsg = e.what(); |
| 469 | BOOST_TEST_ERROR(layerName << ": " << errorMsg); |
| 470 | return false; |
| 471 | } |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 472 | catch(...) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 473 | { |
| 474 | errorMsg = "Unexpected error while testing support for "; |
| 475 | BOOST_TEST_ERROR(errorMsg << layerName); |
| 476 | return false; |
| 477 | } |
| 478 | } |
| 479 | } |
| 480 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 481 | // Helper function to compute the next type in the LayerType enum. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 482 | constexpr armnn::LayerType NextType(armnn::LayerType type) |
| 483 | { |
| 484 | return static_cast<armnn::LayerType>(static_cast<int>(type)+1); |
| 485 | } |
| 486 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 487 | // Termination function for determining the end of the LayerType enumeration. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 488 | template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type> |
| 489 | bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>) |
| 490 | { |
| 491 | return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>()); |
| 492 | }; |
| 493 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 494 | // Recursive function to test and enter in the LayerType enum and then iterate on the next entry. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 495 | template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type> |
| 496 | bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>) |
| 497 | { |
| 498 | bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>()); |
| 499 | |
| 500 | return v && |
| 501 | IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)> |
| 502 | (factory, Tag<NextType(Type)>()); |
| 503 | }; |
| 504 | |
| 505 | // Helper function to pass through to the test framework. |
| 506 | template<typename FactoryType, armnn::DataType DataType> |
| 507 | bool IsLayerSupportedTests(FactoryType *factory) |
| 508 | { |
| 509 | return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>()); |
| 510 | }; |
| 511 | |
| 512 | template<armnn::LayerType Type> |
| 513 | bool TestLayerTypeMatches() |
| 514 | { |
| 515 | using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>; |
| 516 | using LayerType = typename LayerPolicy::Type; |
| 517 | using LayerDesc = typename LayerPolicy::Desc; |
| 518 | DummyLayer<LayerType, LayerDesc> layer; |
| 519 | |
| 520 | std::stringstream ss; |
| 521 | ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value."; |
| 522 | bool v = Type == layer.m_Layer->GetType(); |
| 523 | BOOST_CHECK_MESSAGE(v, ss.str()); |
| 524 | return v; |
| 525 | }; |
| 526 | |
| 527 | template<armnn::LayerType Type> |
| 528 | bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>) |
| 529 | { |
| 530 | return TestLayerTypeMatches<Type>(); |
| 531 | }; |
| 532 | |
| 533 | template<armnn::LayerType Type> |
| 534 | bool LayerTypeMatchesTestImpl(Tag<Type>) |
| 535 | { |
| 536 | return TestLayerTypeMatches<Type>() && |
| 537 | LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>()); |
| 538 | }; |
| 539 | |
| 540 | bool LayerTypeMatchesTest() |
| 541 | { |
| 542 | return LayerTypeMatchesTestImpl<armnn::LayerType::FirstLayer>(Tag<armnn::LayerType::FirstLayer>()); |
| 543 | }; |
| 544 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 545 | template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType> |
| 546 | bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported) |
| 547 | { |
| 548 | armnn::Graph graph; |
| 549 | LayerType* const layer = graph.AddLayer<LayerType>("LayerName"); |
| 550 | |
| 551 | armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input"); |
| 552 | armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output"); |
| 553 | |
| 554 | armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType); |
| 555 | armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType); |
| 556 | |
| 557 | input->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); |
| 558 | input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo); |
| 559 | layer->GetOutputSlot(0).Connect(output->GetInputSlot(0)); |
| 560 | layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo); |
| 561 | |
| 562 | bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported); |
| 563 | |
| 564 | return result; |
| 565 | }; |
| 566 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 567 | } //namespace |