blob: 58f1284ebdcca09b94cc4e50dfc399d8b80b05e8 [file] [log] [blame]
Aron Virginas-Tar70104002018-10-24 15:33:28 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00006#include <backendsCommon/test/EndToEndTestImpl.hpp>
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +00007
Aron Virginas-Tarfe15eff2019-07-01 16:12:58 +01008#include <backendsCommon/test/ArithmeticTestImpl.hpp>
Francis Murtaghe24e3cd2019-06-25 14:41:55 +01009#include <backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp>
Aron Virginas-Tarfe15eff2019-07-01 16:12:58 +010010#include <backendsCommon/test/ConcatTestImpl.hpp>
Narumol Prangnawarat8c7324d2019-05-31 16:42:11 +010011#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +000012#include <backendsCommon/test/DetectionPostProcessTestImpl.hpp>
narpra01db2b1602019-01-23 15:23:11 +000013#include <backendsCommon/test/GatherEndToEndTestImpl.hpp>
Aron Virginas-Tarfe15eff2019-07-01 16:12:58 +010014#include <backendsCommon/test/ResizeEndToEndTestImpl.hpp>
Keith Davis9515c7e2019-06-21 09:33:59 +010015#include <backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp>
Narumol Prangnawarat0be43382019-05-27 11:29:59 +010016#include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
Aron Virginas-Tar98180ef2019-06-26 15:02:47 +010017#include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +010018
19#include <boost/test/unit_test.hpp>
Éanna Ó Catháin20e58802018-12-04 10:29:06 +000020#include <boost/test/execution_monitor.hpp>
Aron Virginas-Tar70104002018-10-24 15:33:28 +010021
22BOOST_AUTO_TEST_SUITE(RefEndToEnd)
23
narpra01b9546cf2018-11-20 15:21:28 +000024std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuRef};
25
Aron Virginas-Tar70104002018-10-24 15:33:28 +010026BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32)
27{
narpra01b9546cf2018-11-20 15:21:28 +000028 BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
Aron Virginas-Tar70104002018-10-24 15:33:28 +010029}
30
31BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8)
32{
narpra01b9546cf2018-11-20 15:21:28 +000033 BOOST_TEST(ConstantUsageUint8Test(defaultBackends));
Aron Virginas-Tar70104002018-10-24 15:33:28 +010034}
35
36BOOST_AUTO_TEST_CASE(Unsigned8)
37{
38 using namespace armnn;
39
40 // Create runtime in which test will run
41 armnn::IRuntime::CreationOptions options;
42 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
43
44 // Builds up the structure of the network.
45 armnn::INetworkPtr net(INetwork::Create());
46
47 IConnectableLayer* input = net->AddInputLayer(0, "input");
48 IConnectableLayer* softmax = net->AddSoftmaxLayer(SoftmaxDescriptor(), "softmax");
49 IConnectableLayer* output = net->AddOutputLayer(0, "output");
50
51 input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
52 softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
53
54 // Sets the tensors in the network.
55 TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
56 inputTensorInfo.SetQuantizationOffset(100);
57 inputTensorInfo.SetQuantizationScale(10000.0f);
58 input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
59
60 TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
61 outputTensorInfo.SetQuantizationOffset(0);
62 outputTensorInfo.SetQuantizationScale(1.0f/255.0f);
63 softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
64
65 // optimize the network
narpra01b9546cf2018-11-20 15:21:28 +000066 IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
Aron Virginas-Tar70104002018-10-24 15:33:28 +010067
68 // Loads it into the runtime.
69 NetworkId netId;
70 auto error = runtime->LoadNetwork(netId, std::move(optNet));
71 BOOST_TEST(error == Status::Success);
72
73 // Creates structures for input & output.
74 std::vector<uint8_t> inputData
75 {
76 1, 10, 3, 200, 5 // Some inputs - one of which is sufficiently larger than the others to saturate softmax.
77 };
78 std::vector<uint8_t> outputData(5);
79
80 armnn::InputTensors inputTensors
81 {
82 {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
83 };
84 armnn::OutputTensors outputTensors
85 {
86 {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
87 };
88
89 // Does the inference.
90 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
91
92 // Checks the results.
93 BOOST_TEST(outputData[0] == 0);
94 BOOST_TEST(outputData[1] == 0);
95 BOOST_TEST(outputData[2] == 0);
96 BOOST_TEST(outputData[3] == 255); // softmax has been saturated.
97 BOOST_TEST(outputData[4] == 0);
98}
99
100BOOST_AUTO_TEST_CASE(TrivialAdd)
101{
102 // This test was designed to match "AddTwo" in android nn/runtime/test/TestTrivialModel.cpp.
103
104 using namespace armnn;
105
106 // Create runtime in which test will run
107 armnn::IRuntime::CreationOptions options;
108 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
109
110 // Builds up the structure of the network.
111 armnn::INetworkPtr net(INetwork::Create());
112
113 IConnectableLayer* input1 = net->AddInputLayer(0);
114 IConnectableLayer* input2 = net->AddInputLayer(1);
115 IConnectableLayer* add = net->AddAdditionLayer();
116 IConnectableLayer* output = net->AddOutputLayer(0);
117
118 input1->GetOutputSlot(0).Connect(add->GetInputSlot(0));
119 input2->GetOutputSlot(0).Connect(add->GetInputSlot(1));
120 add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
121
122 // Sets the tensors in the network.
123 TensorInfo tensorInfo(TensorShape({3, 4}), DataType::Float32);
124 input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
125 input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
126 add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
127
128 // optimize the network
narpra01b9546cf2018-11-20 15:21:28 +0000129 IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100130
131 // Loads it into the runtime.
132 NetworkId netId;
133 runtime->LoadNetwork(netId, std::move(optNet));
134
135 // Creates structures for input & output - matching android nn test.
136 std::vector<float> input1Data
137 {
138 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
139 };
140 std::vector<float> input2Data
141 {
142 100.f, 200.f, 300.f, 400.f, 500.f, 600.f, 700.f, 800.f, 900.f, 1000.f, 1100.f, 1200.f
143 };
144 std::vector<float> outputData(12);
145
146 InputTensors inputTensors
147 {
148 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
149 {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
150 };
151 OutputTensors outputTensors
152 {
153 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
154 };
155
156 // Does the inference.
157 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
158
159 // Checks the results
160 BOOST_TEST(outputData[0] == 101);
161 BOOST_TEST(outputData[1] == 202);
162 BOOST_TEST(outputData[2] == 303);
163 BOOST_TEST(outputData[3] == 404);
164 BOOST_TEST(outputData[4] == 505);
165 BOOST_TEST(outputData[5] == 606);
166 BOOST_TEST(outputData[6] == 707);
167 BOOST_TEST(outputData[7] == 808);
168 BOOST_TEST(outputData[8] == 909);
169 BOOST_TEST(outputData[9] == 1010);
170 BOOST_TEST(outputData[10] == 1111);
171 BOOST_TEST(outputData[11] == 1212);
172}
173
174BOOST_AUTO_TEST_CASE(MultipleOutputs)
175{
176 using namespace armnn;
177
178 // Create runtime in which test will run
179 armnn::IRuntime::CreationOptions options;
180 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
181
182 // Builds up the structure of the network.
183 INetworkPtr net(INetwork::Create());
184
185 IConnectableLayer* input = net->AddInputLayer(0);
186
187 // ReLu1
188 ActivationDescriptor activation1Descriptor;
189 activation1Descriptor.m_Function = ActivationFunction::BoundedReLu;
190 activation1Descriptor.m_A = 1.f;
191 activation1Descriptor.m_B = -1.f;
192 IConnectableLayer* activation1 = net->AddActivationLayer(activation1Descriptor);
193
194 // ReLu6
195 ActivationDescriptor activation2Descriptor;
196 activation2Descriptor.m_Function = ActivationFunction::BoundedReLu;
197 activation2Descriptor.m_A = 6.0f;
198 IConnectableLayer* activation2 = net->AddActivationLayer(activation2Descriptor);
199
200 // BoundedReLu(min=2, max=5)
201 ActivationDescriptor activation3Descriptor;
202 activation3Descriptor.m_Function = ActivationFunction::BoundedReLu;
203 activation3Descriptor.m_A = 5.0f;
204 activation3Descriptor.m_B = 2.0f;
205 IConnectableLayer* activation3 = net->AddActivationLayer(activation3Descriptor);
206
207 IConnectableLayer* output1 = net->AddOutputLayer(0);
208 IConnectableLayer* output2 = net->AddOutputLayer(1);
209 IConnectableLayer* output3 = net->AddOutputLayer(2);
210
211 input->GetOutputSlot(0).Connect(activation1->GetInputSlot(0));
212 input->GetOutputSlot(0).Connect(activation2->GetInputSlot(0));
213 input->GetOutputSlot(0).Connect(activation3->GetInputSlot(0));
214
215 activation1->GetOutputSlot(0).Connect(output1->GetInputSlot(0));
216 activation2->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
217 activation3->GetOutputSlot(0).Connect(output3->GetInputSlot(0));
218
219 // Sets the tensors in the network.
220 TensorInfo tensorInfo(TensorShape({ 10 }), DataType::Float32);
221 input->GetOutputSlot(0).SetTensorInfo(tensorInfo);
222 activation1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
223 activation2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
224 activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo);
225
226 // optimize the network
narpra01b9546cf2018-11-20 15:21:28 +0000227 IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
Aron Virginas-Tar70104002018-10-24 15:33:28 +0100228
229 // Loads it into the runtime.
230 NetworkId netId;
231 runtime->LoadNetwork(netId, std::move(optNet));
232
233 // Creates structures for input & output.
234 const std::vector<float> inputData{ 3.f, 5.f, 2.f, 3.f, 7.f, 0.f, -2.f, -1.f, 3.f, 3.f };
235
236 std::vector<float> output1Data(inputData.size());
237 std::vector<float> output2Data(inputData.size());
238 std::vector<float> output3Data(inputData.size());
239
240 InputTensors inputTensors
241 {
242 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
243 };
244 OutputTensors outputTensors
245 {
246 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), output1Data.data())},
247 {1,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 1), output2Data.data())},
248 {2,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 2), output3Data.data())}
249 };
250
251 // Does the inference.
252 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
253
254 // Checks the results.
255 BOOST_TEST(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1
256 BOOST_TEST(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6
257 BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
258}
259
Éanna Ó Catháin20e58802018-12-04 10:29:06 +0000260BOOST_AUTO_TEST_CASE(TrivialMin)
261{
262 using namespace armnn;
263
264 // Create runtime in which test will run
265 armnn::IRuntime::CreationOptions options;
266 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
267
268 // Builds up the structure of the network.
269 armnn::INetworkPtr net(INetwork::Create());
270
271 IConnectableLayer* input1 = net->AddInputLayer(0);
272 IConnectableLayer* input2 = net->AddInputLayer(1);
273 IConnectableLayer* min = net->AddMinimumLayer();
274 IConnectableLayer* output = net->AddOutputLayer(0);
275
276 input1->GetOutputSlot(0).Connect(min->GetInputSlot(0));
277 input2->GetOutputSlot(0).Connect(min->GetInputSlot(1));
278 min->GetOutputSlot(0).Connect(output->GetInputSlot(0));
279
280 // Sets the tensors in the network.
281 TensorInfo tensorInfo(TensorShape({1, 1, 1, 4}), DataType::Float32);
282 input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
283 input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
284 min->GetOutputSlot(0).SetTensorInfo(tensorInfo);
285
286 // optimize the network
287 IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
288
289 // Loads it into the runtime.
290 NetworkId netId;
291 runtime->LoadNetwork(netId, std::move(optNet));
292
293 // Creates structures for input & output - matching android nn test.
294 std::vector<float> input1Data
295 {
296 1.0f, 2.0f, 3.0f, 4.0f
297 };
298 std::vector<float> input2Data
299 {
300 2.0f, 1.0f, 5.0f, 2.0f
301 };
302 std::vector<float> outputData(4);
303
304 InputTensors inputTensors
305 {
306 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
307 {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
308 };
309 OutputTensors outputTensors
310 {
311 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
312 };
313
314 // Does the inference.
315 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
316
317 // Checks the results
318 BOOST_TEST(outputData[0] == 1);
319 BOOST_TEST(outputData[1] == 1);
320 BOOST_TEST(outputData[2] == 3);
321 BOOST_TEST(outputData[3] == 2);
322}
323
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000324BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest)
325{
kevmay012b4d88e2019-01-24 14:05:09 +0000326 const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
327 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000328
kevmay012b4d88e2019-01-24 14:05:09 +0000329 ArithmeticSimpleEndToEnd<armnn::DataType::Float32, armnn::DataType::Boolean>(defaultBackends,
330 LayerType::Equal,
331 expectedOutput);
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000332}
333
334BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest)
335{
kevmay012b4d88e2019-01-24 14:05:09 +0000336 const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
337 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000338
kevmay012b4d88e2019-01-24 14:05:09 +0000339 ArithmeticSimpleEndToEnd<armnn::DataType::Float32, armnn::DataType::Boolean>(defaultBackends,
340 LayerType::Greater,
341 expectedOutput);
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000342}
343
344BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test)
345{
346 const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
347 0, 0, 0, 0, 1, 1, 1, 1 });
348
kevmay012b4d88e2019-01-24 14:05:09 +0000349 ArithmeticSimpleEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Boolean>(defaultBackends,
350 LayerType::Equal,
351 expectedOutput);
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000352}
353
354BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test)
355{
356 const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
357 0, 0, 0, 0, 0, 0, 0, 0 });
358
kevmay012b4d88e2019-01-24 14:05:09 +0000359 ArithmeticSimpleEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Boolean>(defaultBackends,
360 LayerType::Greater,
361 expectedOutput);
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000362}
363
364BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest)
365{
kevmay012b4d88e2019-01-24 14:05:09 +0000366 const std::vector<uint8_t> expectedOutput({ 1, 0, 1, 1, 0, 0,
367 0, 0, 0, 0, 0, 0 });
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000368
kevmay012b4d88e2019-01-24 14:05:09 +0000369 ArithmeticBroadcastEndToEnd<armnn::DataType::Float32, armnn::DataType::Boolean>(defaultBackends,
370 LayerType::Equal,
371 expectedOutput);
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000372}
373
374BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest)
375{
kevmay012b4d88e2019-01-24 14:05:09 +0000376 const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
377 1, 1, 1, 1, 1, 1 });
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000378
kevmay012b4d88e2019-01-24 14:05:09 +0000379 ArithmeticBroadcastEndToEnd<armnn::DataType::Float32, armnn::DataType::Boolean>(defaultBackends,
380 LayerType::Greater,
381 expectedOutput);
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000382}
383
384BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test)
385{
386 const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
387 0, 0, 0, 0, 0, 0 });
388
kevmay012b4d88e2019-01-24 14:05:09 +0000389 ArithmeticBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Boolean>(defaultBackends,
390 LayerType::Equal,
391 expectedOutput);
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000392}
393
394BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
395{
396 const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
397 1, 1, 1, 1, 1, 1 });
398
kevmay012b4d88e2019-01-24 14:05:09 +0000399 ArithmeticBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Boolean>(defaultBackends,
400 LayerType::Greater,
401 expectedOutput);
FrancisMurtagh2262bbd2018-12-20 16:09:45 +0000402}
Éanna Ó Catháin20e58802018-12-04 10:29:06 +0000403
Francis Murtaghe24e3cd2019-06-25 14:41:55 +0100404BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NHWCTest)
405{
406 BatchToSpaceNdEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
407}
408
409BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NHWCTest)
410{
411 BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
412}
413
414BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NHWCTest)
415{
416 BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
417}
418
419BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NCHWTest)
420{
421 BatchToSpaceNdEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
422}
423
424BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NCHWTest)
425{
426 BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
427}
428
429BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NCHWTest)
430{
431 BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
432}
433
434BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest)
435{
436 BatchToSpaceNdComplexEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
437}
438
439BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NHWCTest)
440{
441 BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
442}
443
444BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest)
445{
446 BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
447}
448
449BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest)
450{
451 BatchToSpaceNdComplexEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
452}
453
454BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NCHWTest)
455{
456 BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
457}
458
459BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest)
460{
461 BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
462}
463
Jim Flynne242f2d2019-05-22 14:24:13 +0100464BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test)
narpra01b9546cf2018-11-20 15:21:28 +0000465{
Jim Flynne242f2d2019-05-22 14:24:13 +0100466 ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
narpra01b9546cf2018-11-20 15:21:28 +0000467}
468
Jim Flynne242f2d2019-05-22 14:24:13 +0100469BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Uint8Test)
narpra01b9546cf2018-11-20 15:21:28 +0000470{
Jim Flynne242f2d2019-05-22 14:24:13 +0100471 ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
narpra01b9546cf2018-11-20 15:21:28 +0000472}
473
Jim Flynne242f2d2019-05-22 14:24:13 +0100474BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test)
narpra01b9546cf2018-11-20 15:21:28 +0000475{
Jim Flynne242f2d2019-05-22 14:24:13 +0100476 ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
narpra01b9546cf2018-11-20 15:21:28 +0000477}
478
Jim Flynne242f2d2019-05-22 14:24:13 +0100479BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Uint8Test)
narpra01b9546cf2018-11-20 15:21:28 +0000480{
Jim Flynne242f2d2019-05-22 14:24:13 +0100481 ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
narpra01b9546cf2018-11-20 15:21:28 +0000482}
483
Jim Flynne242f2d2019-05-22 14:24:13 +0100484BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test)
narpra01b9546cf2018-11-20 15:21:28 +0000485{
Jim Flynne242f2d2019-05-22 14:24:13 +0100486 ConcatDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
narpra01b9546cf2018-11-20 15:21:28 +0000487}
488
Jim Flynne242f2d2019-05-22 14:24:13 +0100489BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Uint8Test)
narpra01b9546cf2018-11-20 15:21:28 +0000490{
Jim Flynne242f2d2019-05-22 14:24:13 +0100491 ConcatDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
narpra01b9546cf2018-11-20 15:21:28 +0000492}
493
Jim Flynne242f2d2019-05-22 14:24:13 +0100494BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test)
narpra01b9546cf2018-11-20 15:21:28 +0000495{
Jim Flynne242f2d2019-05-22 14:24:13 +0100496 ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
narpra01b9546cf2018-11-20 15:21:28 +0000497}
498
Jim Flynne242f2d2019-05-22 14:24:13 +0100499BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test)
narpra01b9546cf2018-11-20 15:21:28 +0000500{
Jim Flynne242f2d2019-05-22 14:24:13 +0100501 ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
narpra01b9546cf2018-11-20 15:21:28 +0000502}
503
narpra01db2b1602019-01-23 15:23:11 +0000504BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
505{
506 GatherEndToEnd<armnn::DataType::Float32>(defaultBackends);
507}
508
509BOOST_AUTO_TEST_CASE(RefGatherUint8Test)
510{
511 GatherEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
512}
513
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +0100514BOOST_AUTO_TEST_CASE(RefGatherInt16Test)
515{
516 GatherEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
517}
518
narpra01db2b1602019-01-23 15:23:11 +0000519BOOST_AUTO_TEST_CASE(RefGatherMultiDimFloatTest)
520{
521 GatherMultiDimEndToEnd<armnn::DataType::Float32>(defaultBackends);
522}
523
524BOOST_AUTO_TEST_CASE(RefGatherMultiDimUint8Test)
525{
526 GatherMultiDimEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
527}
528
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +0100529BOOST_AUTO_TEST_CASE(RefGatherMultiDimInt16Test)
530{
531 GatherMultiDimEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
532}
533
Narumol Prangnawarat8c7324d2019-05-31 16:42:11 +0100534BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
535{
536 DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
537}
538
539BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
540{
541 DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
542}
543
Narumol Prangnawaratb6441e42019-06-04 11:22:00 +0100544BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleInt16Test)
545{
546 DequantizeEndToEndSimple<armnn::DataType::QuantisedSymm16>(defaultBackends);
547}
548
549BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetInt16Test)
550{
551 DequantizeEndToEndOffset<armnn::DataType::QuantisedSymm16>(defaultBackends);
552}
553
Narumol Prangnawarat6d302bf2019-02-04 11:46:26 +0000554BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsTest)
555{
556 std::vector<float> boxEncodings({
557 0.0f, 0.0f, 0.0f, 0.0f,
558 0.0f, 1.0f, 0.0f, 0.0f,
559 0.0f, -1.0f, 0.0f, 0.0f,
560 0.0f, 0.0f, 0.0f, 0.0f,
561 0.0f, 1.0f, 0.0f, 0.0f,
562 0.0f, 0.0f, 0.0f, 0.0f
563 });
564 std::vector<float> scores({
565 0.0f, 0.9f, 0.8f,
566 0.0f, 0.75f, 0.72f,
567 0.0f, 0.6f, 0.5f,
568 0.0f, 0.93f, 0.95f,
569 0.0f, 0.5f, 0.4f,
570 0.0f, 0.3f, 0.2f
571 });
572 std::vector<float> anchors({
573 0.5f, 0.5f, 1.0f, 1.0f,
574 0.5f, 0.5f, 1.0f, 1.0f,
575 0.5f, 0.5f, 1.0f, 1.0f,
576 0.5f, 10.5f, 1.0f, 1.0f,
577 0.5f, 10.5f, 1.0f, 1.0f,
578 0.5f, 100.5f, 1.0f, 1.0f
579 });
580 DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
581}
582
583inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo& info)
584{
585 for (size_t i = 0; i < info.GetNumElements(); i++)
586 {
587 quant[i] = armnn::Quantize<uint8_t>(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
588 }
589}
590
591BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsUint8Test)
592{
593 armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
594 armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
595 armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
596
597 boxEncodingsInfo.SetQuantizationScale(1.0f);
598 boxEncodingsInfo.SetQuantizationOffset(1);
599 scoresInfo.SetQuantizationScale(0.01f);
600 scoresInfo.SetQuantizationOffset(0);
601 anchorsInfo.SetQuantizationScale(0.5f);
602 anchorsInfo.SetQuantizationOffset(0);
603
604 std::vector<float> boxEncodings({
605 0.0f, 0.0f, 0.0f, 0.0f,
606 0.0f, 1.0f, 0.0f, 0.0f,
607 0.0f, -1.0f, 0.0f, 0.0f,
608 0.0f, 0.0f, 0.0f, 0.0f,
609 0.0f, 1.0f, 0.0f, 0.0f,
610 0.0f, 0.0f, 0.0f, 0.0f
611 });
612 std::vector<float> scores({
613 0.0f, 0.9f, 0.8f,
614 0.0f, 0.75f, 0.72f,
615 0.0f, 0.6f, 0.5f,
616 0.0f, 0.93f, 0.95f,
617 0.0f, 0.5f, 0.4f,
618 0.0f, 0.3f, 0.2f
619 });
620 std::vector<float> anchors({
621 0.5f, 0.5f, 1.0f, 1.0f,
622 0.5f, 0.5f, 1.0f, 1.0f,
623 0.5f, 0.5f, 1.0f, 1.0f,
624 0.5f, 10.5f, 1.0f, 1.0f,
625 0.5f, 10.5f, 1.0f, 1.0f,
626 0.5f, 100.5f, 1.0f, 1.0f
627 });
628
629 std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
630 std::vector<uint8_t> qScores(scores.size(), 0);
631 std::vector<uint8_t> qAnchors(anchors.size(), 0);
632 QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
633 QuantizeData(qScores.data(), scores.data(), scoresInfo);
634 QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
635 DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
636 qScores, qAnchors,
637 1.0f, 1, 0.01f, 0, 0.5f, 0);
638}
639
640BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsTest)
641{
642 std::vector<float> boxEncodings({
643 0.0f, 0.0f, 0.0f, 0.0f,
644 0.0f, 1.0f, 0.0f, 0.0f,
645 0.0f, -1.0f, 0.0f, 0.0f,
646 0.0f, 0.0f, 0.0f, 0.0f,
647 0.0f, 1.0f, 0.0f, 0.0f,
648 0.0f, 0.0f, 0.0f, 0.0f
649 });
650 std::vector<float> scores({
651 0.0f, 0.9f, 0.8f,
652 0.0f, 0.75f, 0.72f,
653 0.0f, 0.6f, 0.5f,
654 0.0f, 0.93f, 0.95f,
655 0.0f, 0.5f, 0.4f,
656 0.0f, 0.3f, 0.2f
657 });
658 std::vector<float> anchors({
659 0.5f, 0.5f, 1.0f, 1.0f,
660 0.5f, 0.5f, 1.0f, 1.0f,
661 0.5f, 0.5f, 1.0f, 1.0f,
662 0.5f, 10.5f, 1.0f, 1.0f,
663 0.5f, 10.5f, 1.0f, 1.0f,
664 0.5f, 100.5f, 1.0f, 1.0f
665 });
666 DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
667}
668
669BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test)
670{
671 armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
672 armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
673 armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
674
675 boxEncodingsInfo.SetQuantizationScale(1.0f);
676 boxEncodingsInfo.SetQuantizationOffset(1);
677 scoresInfo.SetQuantizationScale(0.01f);
678 scoresInfo.SetQuantizationOffset(0);
679 anchorsInfo.SetQuantizationScale(0.5f);
680 anchorsInfo.SetQuantizationOffset(0);
681
682 std::vector<float> boxEncodings({
683 0.0f, 0.0f, 0.0f, 0.0f,
684 0.0f, 1.0f, 0.0f, 0.0f,
685 0.0f, -1.0f, 0.0f, 0.0f,
686 0.0f, 0.0f, 0.0f, 0.0f,
687 0.0f, 1.0f, 0.0f, 0.0f,
688 0.0f, 0.0f, 0.0f, 0.0f
689 });
690 std::vector<float> scores({
691 0.0f, 0.9f, 0.8f,
692 0.0f, 0.75f, 0.72f,
693 0.0f, 0.6f, 0.5f,
694 0.0f, 0.93f, 0.95f,
695 0.0f, 0.5f, 0.4f,
696 0.0f, 0.3f, 0.2f
697 });
698 std::vector<float> anchors({
699 0.5f, 0.5f, 1.0f, 1.0f,
700 0.5f, 0.5f, 1.0f, 1.0f,
701 0.5f, 0.5f, 1.0f, 1.0f,
702 0.5f, 10.5f, 1.0f, 1.0f,
703 0.5f, 10.5f, 1.0f, 1.0f,
704 0.5f, 100.5f, 1.0f, 1.0f
705 });
706
707 std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
708 std::vector<uint8_t> qScores(scores.size(), 0);
709 std::vector<uint8_t> qAnchors(anchors.size(), 0);
710 QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
711 QuantizeData(qScores.data(), scores.data(), scoresInfo);
712 QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
713 DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
714 qScores, qAnchors,
715 1.0f, 1, 0.01f, 0, 0.5f, 0);
716}
717
Keith Davis9515c7e2019-06-21 09:33:59 +0100718BOOST_AUTO_TEST_CASE(RefSpaceToDepthNHWCEndToEndTest1)
719{
720 const unsigned int blockSize = 2;
721
722 armnn::TensorShape inputShape{1, 2, 2, 1};
723 armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
724
725 armnn::TensorShape outputShape{1, 1, 1, 4};
726 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
727
728 std::vector<float> inputData = std::vector<float>(
729 {
730 1.0f, 2.0f, 3.0f, 4.0f
731 });
732
733 std::vector<float> expectedOutputData = std::vector<float>(
734 {
735 1.0f, 2.0f, 3.0f, 4.0f
736 });
737
738 SpaceToDepthEndToEnd(defaultBackends,
739 armnn::DataLayout::NHWC,
740 inputTensorInfo,
741 outputTensorInfo,
742 inputData,
743 expectedOutputData,
744 blockSize);
745}
746
747BOOST_AUTO_TEST_CASE(RefSpaceToDepthNCHWEndToEndTest1)
748{
749 const unsigned int blockSize = 2;
750
751 armnn::TensorShape inputShape{1, 2, 2, 1};
752 armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
753
754 armnn::TensorShape outputShape{1, 1, 1, 4};
755 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
756
757 std::vector<float> inputData = std::vector<float>(
758 {
759 1.0f, 2.0f, 3.0f, 4.0f
760 });
761
762 std::vector<float> expectedOutputData = std::vector<float>(
763 {
764 1.0f, 2.0f, 3.0f, 4.0f
765 });
766
767 SpaceToDepthEndToEnd(defaultBackends,
768 armnn::DataLayout::NCHW,
769 inputTensorInfo,
770 outputTensorInfo,
771 inputData,
772 expectedOutputData,
773 blockSize);
774}
775
776BOOST_AUTO_TEST_CASE(RefSpaceToDepthNHWCEndToEndTest2)
777{
778 const unsigned int blockSize = 2;
779
780 armnn::TensorShape inputShape{1, 2, 2, 2};
781 armnn::TensorShape outputShape{1, 1, 1, 8};
782
783 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
784 armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
785
786 std::vector<float> inputData = std::vector<float>(
787 {
788 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
789 });
790
791 std::vector<float> expectedOutputData = std::vector<float>(
792 {
793 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
794 });
795
796 SpaceToDepthEndToEnd(defaultBackends,
797 armnn::DataLayout::NHWC,
798 inputTensorInfo,
799 outputTensorInfo,
800 inputData,
801 expectedOutputData,
802 blockSize);
803}
804
805BOOST_AUTO_TEST_CASE(RefSpaceToDepthNCHWEndToEndTest2)
806{
807 const unsigned int blockSize = 2;
808
809 armnn::TensorShape inputShape{1, 2, 2, 2};
810 armnn::TensorShape outputShape{1, 1, 1, 8};
811
812 armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
813 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
814
815
816 std::vector<float> inputData = std::vector<float>(
817 {
818 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
819 });
820
821 std::vector<float> expectedOutputData = std::vector<float>(
822 {
823 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
824 });
825
826 SpaceToDepthEndToEnd(defaultBackends,
827 armnn::DataLayout::NCHW,
828 inputTensorInfo,
829 outputTensorInfo,
830 inputData,
831 expectedOutputData,
832 blockSize);
833}
834
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100835BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndTest)
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100836{
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100837 Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100838}
839
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100840BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndUint8Test)
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100841{
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100842 Splitter1dEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100843}
844
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100845BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndTest)
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100846{
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100847 Splitter2dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100848}
849
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100850BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndTest)
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100851{
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100852 Splitter2dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100853}
854
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100855BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndUint8Test)
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100856{
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100857 Splitter2dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100858}
859
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100860BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndUint8Test)
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100861{
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100862 Splitter2dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100863}
864
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100865BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndTest)
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100866{
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100867 Splitter3dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100868}
869
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100870BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndTest)
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100871{
Narumol Prangnawarat0f072ab2019-05-29 14:12:46 +0100872 Splitter3dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
873}
874
875BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndTest)
876{
877 Splitter3dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
878}
879
880BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndUint8Test)
881{
882 Splitter3dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
883}
884
885BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndUint8Test)
886{
887 Splitter3dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
888}
889
890BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndUint8Test)
891{
892 Splitter3dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
893}
894
895BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndTest)
896{
897 Splitter4dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
898}
899
900BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndTest)
901{
902 Splitter4dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
903}
904
905BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndTest)
906{
907 Splitter4dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
908}
909
910BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndTest)
911{
912 Splitter4dDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
913}
914
915BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndUint8Test)
916{
917 Splitter4dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
918}
919
920BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndUint8Test)
921{
922 Splitter4dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
923}
924
925BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndUint8Test)
926{
927 Splitter4dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
928}
929
930BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndUint8Test)
931{
932 Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
Narumol Prangnawarat0be43382019-05-27 11:29:59 +0100933}
934
Aron Virginas-Tar98180ef2019-06-26 15:02:47 +0100935// TransposeConvolution2d
936BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNchwTest)
937{
938 TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
939 defaultBackends, armnn::DataLayout::NCHW);
940}
941
942BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NchwTest)
943{
944 TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
945 defaultBackends, armnn::DataLayout::NCHW);
946}
947
948BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NchwTest)
949{
950 TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
951 defaultBackends, armnn::DataLayout::NCHW);
952}
953
954BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNhwcTest)
955{
956 TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
957 defaultBackends, armnn::DataLayout::NHWC);
958}
959
960BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NhwcTest)
961{
962 TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
963 defaultBackends, armnn::DataLayout::NHWC);
964}
965
966BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NhwcTest)
967{
968 TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
969 defaultBackends, armnn::DataLayout::NHWC);
970}
971
Aron Virginas-Tarfe15eff2019-07-01 16:12:58 +0100972// Resize Bilinear
973BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNchwTest)
974{
975 ResizeBilinearEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
976}
977
978BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NchwTest)
979{
980 ResizeBilinearEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
981}
982
983BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NchwTest)
984{
985 ResizeBilinearEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
986}
987
988BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNhwcTest)
989{
990 ResizeBilinearEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
991}
992
993BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NhwcTest)
994{
995 ResizeBilinearEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
996}
997
998BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NhwcTest)
999{
1000 ResizeBilinearEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
1001}
1002
1003// Resize NearestNeighbor
1004BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNchwTest)
1005{
1006 ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
1007}
1008
1009BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NchwTest)
1010{
1011 ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
1012}
1013
1014BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NchwTest)
1015{
1016 ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
1017}
1018
1019BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNhwcTest)
1020{
1021 ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
1022}
1023
1024BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NhwcTest)
1025{
1026 ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
1027}
1028
1029BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NhwcTest)
1030{
1031 ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
1032}
1033
1034BOOST_AUTO_TEST_SUITE_END()