blob: dc2ffb81ffd8b37565aef304145d9e3d34854910 [file] [log] [blame]
Aron Virginas-Tar70104002018-10-24 15:33:28 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00006#include <backendsCommon/test/EndToEndTestImpl.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +01007
8#include <boost/test/unit_test.hpp>
9
10BOOST_AUTO_TEST_SUITE(RefEndToEnd)
11
12BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32)
13{
14 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
15 BOOST_TEST(ConstantUsageFloat32Test(backends));
16}
17
18BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8)
19{
20 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
21 BOOST_TEST(ConstantUsageUint8Test(backends));
22}
23
24BOOST_AUTO_TEST_CASE(Unsigned8)
25{
26 using namespace armnn;
27
28 // Create runtime in which test will run
29 armnn::IRuntime::CreationOptions options;
30 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
31
32 // Builds up the structure of the network.
33 armnn::INetworkPtr net(INetwork::Create());
34
35 IConnectableLayer* input = net->AddInputLayer(0, "input");
36 IConnectableLayer* softmax = net->AddSoftmaxLayer(SoftmaxDescriptor(), "softmax");
37 IConnectableLayer* output = net->AddOutputLayer(0, "output");
38
39 input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
40 softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
41
42 // Sets the tensors in the network.
43 TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
44 inputTensorInfo.SetQuantizationOffset(100);
45 inputTensorInfo.SetQuantizationScale(10000.0f);
46 input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
47
48 TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
49 outputTensorInfo.SetQuantizationOffset(0);
50 outputTensorInfo.SetQuantizationScale(1.0f/255.0f);
51 softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
52
53 // optimize the network
54 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
55 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
56
57 // Loads it into the runtime.
58 NetworkId netId;
59 auto error = runtime->LoadNetwork(netId, std::move(optNet));
60 BOOST_TEST(error == Status::Success);
61
62 // Creates structures for input & output.
63 std::vector<uint8_t> inputData
64 {
65 1, 10, 3, 200, 5 // Some inputs - one of which is sufficiently larger than the others to saturate softmax.
66 };
67 std::vector<uint8_t> outputData(5);
68
69 armnn::InputTensors inputTensors
70 {
71 {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
72 };
73 armnn::OutputTensors outputTensors
74 {
75 {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
76 };
77
78 // Does the inference.
79 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
80
81 // Checks the results.
82 BOOST_TEST(outputData[0] == 0);
83 BOOST_TEST(outputData[1] == 0);
84 BOOST_TEST(outputData[2] == 0);
85 BOOST_TEST(outputData[3] == 255); // softmax has been saturated.
86 BOOST_TEST(outputData[4] == 0);
87}
88
89BOOST_AUTO_TEST_CASE(TrivialAdd)
90{
91 // This test was designed to match "AddTwo" in android nn/runtime/test/TestTrivialModel.cpp.
92
93 using namespace armnn;
94
95 // Create runtime in which test will run
96 armnn::IRuntime::CreationOptions options;
97 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
98
99 // Builds up the structure of the network.
100 armnn::INetworkPtr net(INetwork::Create());
101
102 IConnectableLayer* input1 = net->AddInputLayer(0);
103 IConnectableLayer* input2 = net->AddInputLayer(1);
104 IConnectableLayer* add = net->AddAdditionLayer();
105 IConnectableLayer* output = net->AddOutputLayer(0);
106
107 input1->GetOutputSlot(0).Connect(add->GetInputSlot(0));
108 input2->GetOutputSlot(0).Connect(add->GetInputSlot(1));
109 add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
110
111 // Sets the tensors in the network.
112 TensorInfo tensorInfo(TensorShape({3, 4}), DataType::Float32);
113 input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
114 input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
115 add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
116
117 // optimize the network
118 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
119 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
120
121 // Loads it into the runtime.
122 NetworkId netId;
123 runtime->LoadNetwork(netId, std::move(optNet));
124
125 // Creates structures for input & output - matching android nn test.
126 std::vector<float> input1Data
127 {
128 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
129 };
130 std::vector<float> input2Data
131 {
132 100.f, 200.f, 300.f, 400.f, 500.f, 600.f, 700.f, 800.f, 900.f, 1000.f, 1100.f, 1200.f
133 };
134 std::vector<float> outputData(12);
135
136 InputTensors inputTensors
137 {
138 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
139 {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
140 };
141 OutputTensors outputTensors
142 {
143 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
144 };
145
146 // Does the inference.
147 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
148
149 // Checks the results
150 BOOST_TEST(outputData[0] == 101);
151 BOOST_TEST(outputData[1] == 202);
152 BOOST_TEST(outputData[2] == 303);
153 BOOST_TEST(outputData[3] == 404);
154 BOOST_TEST(outputData[4] == 505);
155 BOOST_TEST(outputData[5] == 606);
156 BOOST_TEST(outputData[6] == 707);
157 BOOST_TEST(outputData[7] == 808);
158 BOOST_TEST(outputData[8] == 909);
159 BOOST_TEST(outputData[9] == 1010);
160 BOOST_TEST(outputData[10] == 1111);
161 BOOST_TEST(outputData[11] == 1212);
162}
163
164BOOST_AUTO_TEST_CASE(MultipleOutputs)
165{
166 using namespace armnn;
167
168 // Create runtime in which test will run
169 armnn::IRuntime::CreationOptions options;
170 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
171
172 // Builds up the structure of the network.
173 INetworkPtr net(INetwork::Create());
174
175 IConnectableLayer* input = net->AddInputLayer(0);
176
177 // ReLu1
178 ActivationDescriptor activation1Descriptor;
179 activation1Descriptor.m_Function = ActivationFunction::BoundedReLu;
180 activation1Descriptor.m_A = 1.f;
181 activation1Descriptor.m_B = -1.f;
182 IConnectableLayer* activation1 = net->AddActivationLayer(activation1Descriptor);
183
184 // ReLu6
185 ActivationDescriptor activation2Descriptor;
186 activation2Descriptor.m_Function = ActivationFunction::BoundedReLu;
187 activation2Descriptor.m_A = 6.0f;
188 IConnectableLayer* activation2 = net->AddActivationLayer(activation2Descriptor);
189
190 // BoundedReLu(min=2, max=5)
191 ActivationDescriptor activation3Descriptor;
192 activation3Descriptor.m_Function = ActivationFunction::BoundedReLu;
193 activation3Descriptor.m_A = 5.0f;
194 activation3Descriptor.m_B = 2.0f;
195 IConnectableLayer* activation3 = net->AddActivationLayer(activation3Descriptor);
196
197 IConnectableLayer* output1 = net->AddOutputLayer(0);
198 IConnectableLayer* output2 = net->AddOutputLayer(1);
199 IConnectableLayer* output3 = net->AddOutputLayer(2);
200
201 input->GetOutputSlot(0).Connect(activation1->GetInputSlot(0));
202 input->GetOutputSlot(0).Connect(activation2->GetInputSlot(0));
203 input->GetOutputSlot(0).Connect(activation3->GetInputSlot(0));
204
205 activation1->GetOutputSlot(0).Connect(output1->GetInputSlot(0));
206 activation2->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
207 activation3->GetOutputSlot(0).Connect(output3->GetInputSlot(0));
208
209 // Sets the tensors in the network.
210 TensorInfo tensorInfo(TensorShape({ 10 }), DataType::Float32);
211 input->GetOutputSlot(0).SetTensorInfo(tensorInfo);
212 activation1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
213 activation2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
214 activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo);
215
216 // optimize the network
217 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
218 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
219
220 // Loads it into the runtime.
221 NetworkId netId;
222 runtime->LoadNetwork(netId, std::move(optNet));
223
224 // Creates structures for input & output.
225 const std::vector<float> inputData{ 3.f, 5.f, 2.f, 3.f, 7.f, 0.f, -2.f, -1.f, 3.f, 3.f };
226
227 std::vector<float> output1Data(inputData.size());
228 std::vector<float> output2Data(inputData.size());
229 std::vector<float> output3Data(inputData.size());
230
231 InputTensors inputTensors
232 {
233 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
234 };
235 OutputTensors outputTensors
236 {
237 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), output1Data.data())},
238 {1,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 1), output2Data.data())},
239 {2,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 2), output3Data.data())}
240 };
241
242 // Does the inference.
243 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
244
245 // Checks the results.
246 BOOST_TEST(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1
247 BOOST_TEST(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6
248 BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
249}
250
251BOOST_AUTO_TEST_SUITE_END()