blob: 27970803600698e82616e56bf2ee3c2936dba833 [file] [log] [blame]
Aron Virginas-Tar70104002018-10-24 15:33:28 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
Aron Virginas-Tar56055192018-11-12 18:10:43 +00006#include "ClWorkloadFactoryHelper.hpp"
7
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <Network.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +01009
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <test/GraphUtils.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +010011
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <cl/ClWorkloadFactory.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +010013
14#include <boost/test/unit_test.hpp>
15
16BOOST_AUTO_TEST_SUITE(ClOptimizedNetwork)
17
18BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)
19{
20 // build up the structure of the network
21 armnn::INetworkPtr net(armnn::INetwork::Create());
22
23 armnn::IConnectableLayer* input = net->AddInputLayer(0);
24 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
25
26 input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
27 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
28
29 armnn::IRuntime::CreationOptions options;
30 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
31
32 std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
33 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
34 BOOST_CHECK(optNet);
35 // validate workloads
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000036 armnn::ClWorkloadFactory fact =
37 ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
Aron Virginas-Tar70104002018-10-24 15:33:28 +010038 for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
39 {
40 BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
41 BOOST_CHECK_NO_THROW(
Derek Lamberti94a88d22019-12-10 21:12:59 +000042 layer->CreateWorkload(fact));
Aron Virginas-Tar70104002018-10-24 15:33:28 +010043 }
44}
45
46BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
47{
48 // Test to check when Fp16 Turbo mode set
49 // it converts the Fp32 network to Fp16 Network
50 // add Fp32ToFp16 conversion layer after the InputLayer
51 // add Fp16ToFp32 conversion layer after the OutputLayer
52 // checks the other layers if they are supported in Fp16
53 // if they are not put the conversion layers before and after
54 // if they are not supported in Fp16 use Fp32 instead
55 // if there are inverse conversion layers remove them with optimization
56 // at the moment FloorLayer is not supported in Fp16 so it rolls back to Fp32
57 // and inverse conversion layers are removed by the optimizer
58 armnn::Network net;
59
60 // Defines layers.
61 auto input = net.AddInputLayer(0, "input layer");
62 // ReLu1
63 armnn::ActivationDescriptor activation1Descriptor;
64 activation1Descriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
65 activation1Descriptor.m_A = 1.f;
66 activation1Descriptor.m_B = -1.f;
67 auto activation = net.AddActivationLayer(activation1Descriptor, "activation layer");
68 auto output = net.AddOutputLayer(0, "output layer");
69
70 // Connects layers.
71 input->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
72 activation->GetOutputSlot(0).Connect(output->GetInputSlot(0));
73
74 armnn::TensorShape shape({4});
75 armnn::TensorInfo info(shape, armnn::DataType::Float32);
76 input->GetOutputSlot(0).SetTensorInfo(info);
77 activation->GetOutputSlot(0).SetTensorInfo(info);
78
79 armnn::IRuntime::CreationOptions options;
80 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
81
82 std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
83
84 armnn::OptimizerOptions optimizerOptions;
85 optimizerOptions.m_ReduceFp32ToFp16 = true;
86
87 armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
88 net, backends, runtime->GetDeviceSpec(), optimizerOptions);
89
90 const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
91
92 // Tests that all layers are present in the graph.
93 BOOST_TEST(graph.GetNumLayers() == 5);
94
95 // Tests that the vertices exist and have correct names.
96 BOOST_TEST(GraphHasNamedLayer(graph, "input layer"));
97 BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp32_to_fp16-0-input layer"));
98 BOOST_TEST(GraphHasNamedLayer(graph, "activation layer"));
99 BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp16_to_fp32-0-output layer"));
100 BOOST_TEST(GraphHasNamedLayer(graph, "output layer"));
101}
102
Sadik Armagan045f6be2020-09-10 13:37:32 +0100103BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)
104{
105 armnn::INetworkPtr net(armnn::INetwork::Create());
106
107 armnn::IConnectableLayer* input = net->AddInputLayer(0);
108 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
109
110 input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
111 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
112
113 armnn::IRuntime::CreationOptions options;
114 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
115
116 std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
117 armnn::OptimizerOptions optimizerOptions;
118 armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
119 optimizerOptions.m_ModelOptions.push_back(modelOptions);
120
121 armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
122 *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
123
124 BOOST_CHECK(optimizedNet);
125
126 auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
127
128 BOOST_TEST(modelOptionsOut.size() == 1);
129 BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
130 BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
131}
132
Matthew Bentham39ef3e52020-01-20 10:09:09 +0000133BOOST_AUTO_TEST_SUITE_END();