blob: 3bf1eb8caafd02c76ff23221b49fdacf6a9d8d30 [file] [log] [blame]
Aron Virginas-Tar70104002018-10-24 15:33:28 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn/ArmNN.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007#include <Graph.hpp>
8#include <Network.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +01009
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <neon/NeonWorkloadFactory.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +010011
12#include <boost/test/unit_test.hpp>
13
14BOOST_AUTO_TEST_SUITE(NeonOptimizedNetwork)
15
16BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
17{
18 // build up the structure of the network
19 armnn::INetworkPtr net(armnn::INetwork::Create());
20
21 armnn::IConnectableLayer* input = net->AddInputLayer(0);
22 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
23
24 input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
25 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
26
27 armnn::IRuntime::CreationOptions options;
28 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
29
30 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
31 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
32 BOOST_CHECK(optNet);
33 // validate workloads
34 armnn::NeonWorkloadFactory fact;
35 for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
36 {
37 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
38 BOOST_CHECK_NO_THROW(
39 layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
40 }
41}
42
43BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
44{
45 // build up the structure of the network
46 armnn::INetworkPtr net(armnn::INetwork::Create());
47
48 armnn::IConnectableLayer* input = net->AddInputLayer(0);
49
50 // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
51 armnn::NormalizationDescriptor descriptor;
52 armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
53
54 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
55
56 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
57 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
58
59 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
60 normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
61
62 armnn::IRuntime::CreationOptions options;
63 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
64
65 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
66 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
67 BOOST_CHECK(!optNet);
68}
69
70BOOST_AUTO_TEST_SUITE_END()