blob: 8d15530c1e2a341b3f8d76d808970d5d05f9fac3 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00006#include <test/CreateWorkload.hpp>
arovir0143095f32018-10-09 18:04:24 +01007
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/CpuTensorHandle.hpp>
9#include <reference/RefWorkloadFactory.hpp>
10#include <reference/workloads/RefWorkloads.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
12namespace
13{
14
15template<typename Workload>
16void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
17{
18 auto queueDescriptor = workload->GetData();
19 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
20 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
21 BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
22 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
23}
24
25template <typename Workload>
26void CheckInputsOutput(std::unique_ptr<Workload> workload,
27 const TensorInfo& inputInfo0,
28 const TensorInfo& inputInfo1,
29 const TensorInfo& outputInfo)
30{
31 auto queueDescriptor = workload->GetData();
32 auto inputHandle0 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
33 auto inputHandle1 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[1]);
34 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
35 BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
36 BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
37 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
38}
39}
40
41BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
42
telsoa01c577f2c2018-08-31 09:22:23 +010043template <typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +000044static void RefCreateActivationWorkloadTest()
45{
46 Graph graph;
47 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +010048 auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000049
telsoa01c577f2c2018-08-31 09:22:23 +010050 // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000051 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010052 TensorInfo({ 1, 1 }, DataType),
53 TensorInfo({ 1, 1 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000054}
55
56BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
57{
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +010058 RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000059}
60
61BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
62{
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +010063 RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +000064}
65
David Beckbc392452018-09-10 14:47:28 +010066template <typename WorkloadType,
67 typename DescriptorType,
68 typename LayerType,
69 armnn::DataType DataType>
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000070static void RefCreateElementwiseWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000071{
72 Graph graph;
73 RefWorkloadFactory factory;
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000074 auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(
75 factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000076
telsoa014fcda012018-03-09 14:13:49 +000077 CheckInputsOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010078 TensorInfo({ 2, 3 }, DataType),
79 TensorInfo({ 2, 3 }, DataType),
80 TensorInfo({ 2, 3 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000081}
82
83BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
84{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +010085 RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000086 AdditionQueueDescriptor,
87 AdditionLayer,
88 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000089}
90
91BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
92{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +010093 RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000094 AdditionQueueDescriptor,
95 AdditionLayer,
96 armnn::DataType::QuantisedAsymm8>();
David Beckbc392452018-09-10 14:47:28 +010097}
98
Sadik Armagan2999a022019-04-09 14:20:12 +010099BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
100{
101 RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
102 AdditionQueueDescriptor,
103 AdditionLayer,
104 armnn::DataType::QuantisedSymm16>();
105}
106
David Beckbc392452018-09-10 14:47:28 +0100107BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
108{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100109 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000110 SubtractionQueueDescriptor,
111 SubtractionLayer,
112 armnn::DataType::Float32>();
David Beckbc392452018-09-10 14:47:28 +0100113}
114
115BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
116{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100117 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000118 SubtractionQueueDescriptor,
119 SubtractionLayer,
120 armnn::DataType::QuantisedAsymm8>();
David Beckbc392452018-09-10 14:47:28 +0100121}
122
Sadik Armagan2999a022019-04-09 14:20:12 +0100123BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
124{
125 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
126 SubtractionQueueDescriptor,
127 SubtractionLayer,
128 armnn::DataType::QuantisedSymm16>();
129}
130
David Beckbc392452018-09-10 14:47:28 +0100131BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
132{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100133 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000134 MultiplicationQueueDescriptor,
135 MultiplicationLayer,
136 armnn::DataType::Float32>();
David Beckbc392452018-09-10 14:47:28 +0100137}
138
139BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
140{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100141 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000142 MultiplicationQueueDescriptor,
143 MultiplicationLayer,
144 armnn::DataType::QuantisedAsymm8>();
David Beckbc392452018-09-10 14:47:28 +0100145}
146
Sadik Armagan2999a022019-04-09 14:20:12 +0100147BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
148{
149 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
150 MultiplicationQueueDescriptor,
151 MultiplicationLayer,
152 armnn::DataType::QuantisedSymm16>();
153}
154
David Beckbc392452018-09-10 14:47:28 +0100155BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
156{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100157 RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000158 DivisionQueueDescriptor,
159 DivisionLayer,
160 armnn::DataType::Float32>();
David Beckbc392452018-09-10 14:47:28 +0100161}
162
163BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
164{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100165 RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000166 DivisionQueueDescriptor,
167 DivisionLayer,
168 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000169}
170
Sadik Armagan2999a022019-04-09 14:20:12 +0100171BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
172{
173 RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
174 DivisionQueueDescriptor,
175 DivisionLayer,
176 armnn::DataType::QuantisedSymm16>();
177}
178
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100179template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
180static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000181{
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100182 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000183 RefWorkloadFactory factory;
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100184 auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory,
185 graph,
186 dataLayout);
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100187
188 TensorShape inputShape;
189 TensorShape outputShape;
190
191 switch (dataLayout)
192 {
193 case DataLayout::NHWC:
Nikhil Rajd1340932018-10-18 14:27:50 +0100194 inputShape = { 2, 4, 4, 3 };
195 outputShape = { 2, 4, 4, 3 };
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100196 break;
197 case DataLayout::NCHW:
198 default:
Nikhil Rajd1340932018-10-18 14:27:50 +0100199 inputShape = { 2, 3, 4, 4 };
200 outputShape = { 2, 3, 4, 4 };
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100201 break;
202 }
telsoa014fcda012018-03-09 14:13:49 +0000203
telsoa01c577f2c2018-08-31 09:22:23 +0100204 // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100205 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
206}
207
208BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
209{
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100210 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float32>
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100211 (DataLayout::NCHW);
212}
213
214BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
215{
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100216 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float32>
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100217 (DataLayout::NHWC);
218}
219
220BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
221{
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100222 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedAsymm8>
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100223 (DataLayout::NCHW);
224}
225
226BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
227{
Matteo Martincigh3122bd52019-06-03 16:54:25 +0100228 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedAsymm8>
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100229 (DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000230}
231
Matteo Martincighf5507132019-06-04 10:59:47 +0100232BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
233{
234 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
235 (DataLayout::NCHW);
236}
237
238BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
239{
240 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
241 (DataLayout::NHWC);
242}
243
telsoa01c577f2c2018-08-31 09:22:23 +0100244BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
245{
246 Graph graph;
247 RefWorkloadFactory factory;
248 auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
249
250 // Checks that outputs and inputs are as we expect them
251 CheckInputOutput(
252 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
253}
254
255BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
256{
257 Graph graph;
258 RefWorkloadFactory factory;
259 auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
260
261 // Checks that outputs and inputs are as we expect them
262 CheckInputOutput(
263 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
264}
265
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100266static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000267{
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100268 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000269 RefWorkloadFactory factory;
Mike Kelly9b398322019-05-22 17:21:49 +0100270 auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100271 (factory, graph, dataLayout);
272
273 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
274 std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
275 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
276 std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
telsoa014fcda012018-03-09 14:13:49 +0000277
telsoa01c577f2c2018-08-31 09:22:23 +0100278 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000279 CheckInputOutput(std::move(workload),
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100280 TensorInfo(inputShape, DataType::Float32),
281 TensorInfo(outputShape, DataType::Float32));
282}
283
284BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
285{
286 RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
287}
288
289BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
290{
291 RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000292}
293
Ruomei Yan495852f2019-05-23 11:37:33 +0100294static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
295{
296 Graph graph;
297 RefWorkloadFactory factory;
298 auto workload = CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dWorkload, DataType::Float32>
299 (factory, graph, dataLayout);
300
301 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
302 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
303 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
304 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
305 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
306 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
307 // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
308 CheckInputOutput(std::move(workload),
309 TensorInfo(inputShape, DataType::Float32),
310 TensorInfo(outputShape, DataType::Float32));
311}
312
313BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
314{
315 RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
316}
317
telsoa01c577f2c2018-08-31 09:22:23 +0100318template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000319static void RefCreateFullyConnectedWorkloadTest()
320{
321 Graph graph;
322 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100323 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000324
telsoa01c577f2c2018-08-31 09:22:23 +0100325 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
326 float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
327 float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
telsoa014fcda012018-03-09 14:13:49 +0000328 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100329 TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
330 TensorInfo({ 3, 7 }, DataType, outputQScale));
telsoa014fcda012018-03-09 14:13:49 +0000331}
332
Francis Murtagh43aec582019-05-27 12:14:10 +0100333BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
telsoa014fcda012018-03-09 14:13:49 +0000334{
Francis Murtagh43aec582019-05-27 12:14:10 +0100335 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000336}
337
Francis Murtagh43aec582019-05-27 12:14:10 +0100338BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
telsoa014fcda012018-03-09 14:13:49 +0000339{
Francis Murtagh43aec582019-05-27 12:14:10 +0100340 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000341}
342
Francis Murtagh46c09d02019-05-28 08:15:28 +0100343BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm16)
344{
345 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedSymm16>();
346}
347
narpra0155a97bc2018-10-02 14:35:53 +0100348template <typename NormalizationWorkloadType, armnn::DataType DataType>
Matteo Martincigha160b242018-10-18 10:33:23 +0100349static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000350{
narpra0155a97bc2018-10-02 14:35:53 +0100351 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000352 RefWorkloadFactory factory;
Matteo Martincigha160b242018-10-18 10:33:23 +0100353 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
354
355 TensorShape inputShape;
356 TensorShape outputShape;
357
358 switch (dataLayout)
359 {
360 case DataLayout::NHWC:
361 inputShape = { 3, 1, 5, 5 };
362 outputShape = { 3, 1, 5, 5 };
363 break;
364 case DataLayout::NCHW:
365 default:
366 inputShape = { 3, 5, 5, 1 };
367 outputShape = { 3, 5, 5, 1 };
368 break;
369 }
telsoa014fcda012018-03-09 14:13:49 +0000370
telsoa01c577f2c2018-08-31 09:22:23 +0100371 // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
Matteo Martincigha160b242018-10-18 10:33:23 +0100372 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
narpra0155a97bc2018-10-02 14:35:53 +0100373}
374
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100375BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload)
narpra0155a97bc2018-10-02 14:35:53 +0100376{
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100377 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
Matteo Martincigha160b242018-10-18 10:33:23 +0100378}
379
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100380BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
Matteo Martincigha160b242018-10-18 10:33:23 +0100381{
Matteo Martincigh2fc70c52019-06-05 14:12:48 +0100382 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
383}
384
385BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
386{
387 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
388}
389
390BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
391{
392 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Matteo Martincigh6aeb7712019-06-05 17:23:29 +0100395BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
396{
397 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
398}
399
400BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
401{
402 RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
403}
404
telsoa01c577f2c2018-08-31 09:22:23 +0100405template <typename Pooling2dWorkloadType, armnn::DataType DataType>
James Conroy69482272018-10-19 10:41:35 +0100406static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000407{
408 Graph graph;
409 RefWorkloadFactory factory;
James Conroy69482272018-10-19 10:41:35 +0100410 auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
411
412 TensorShape inputShape;
413 TensorShape outputShape;
414
415 switch (dataLayout)
416 {
417 case DataLayout::NHWC:
418 inputShape = { 3, 5, 5, 2 };
419 outputShape = { 3, 2, 4, 2 };
420 break;
421 case DataLayout::NCHW:
422 default:
423 inputShape = { 3, 2, 5, 5 };
424 outputShape = { 3, 2, 2, 4 };
425 }
telsoa014fcda012018-03-09 14:13:49 +0000426
telsoa01c577f2c2018-08-31 09:22:23 +0100427 // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
James Conroy69482272018-10-19 10:41:35 +0100428 CheckInputOutput(std::move(workload),
429 TensorInfo(inputShape, DataType),
430 TensorInfo(outputShape, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000431}
432
433BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
434{
Teresa Charlina3b20472019-06-06 11:12:32 +0100435 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
James Conroy69482272018-10-19 10:41:35 +0100436}
437
438BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
439{
Teresa Charlina3b20472019-06-06 11:12:32 +0100440 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000441}
442
443BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
444{
Teresa Charlina3b20472019-06-06 11:12:32 +0100445 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
James Conroy69482272018-10-19 10:41:35 +0100446}
447
448BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
449{
Teresa Charlina3b20472019-06-06 11:12:32 +0100450 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000451}
452
Teresa Charlin0434df62019-06-06 13:40:35 +0100453BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
454{
455 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
456}
457
458BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
459{
460 RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
461}
462
telsoa01c577f2c2018-08-31 09:22:23 +0100463template <typename SoftmaxWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000464static void RefCreateSoftmaxWorkloadTest()
465{
466 Graph graph;
467 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100468 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000469
telsoa01c577f2c2018-08-31 09:22:23 +0100470 // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000471 CheckInputOutput(
472 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100473 TensorInfo({4, 1}, DataType),
474 TensorInfo({4, 1}, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000475}
476
477BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
478{
nikraj01a121de32019-05-29 10:51:05 +0100479 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000480}
481
nikraj01a121de32019-05-29 10:51:05 +0100482BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
telsoa014fcda012018-03-09 14:13:49 +0000483{
nikraj01a121de32019-05-29 10:51:05 +0100484 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000485}
486
nikraj01248683f2019-05-29 16:46:50 +0100487BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
488{
489 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedSymm16>();
490}
491
telsoa01c577f2c2018-08-31 09:22:23 +0100492template <typename SplitterWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000493static void RefCreateSplitterWorkloadTest()
494{
495 Graph graph;
496 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100497 auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000498
telsoa01c577f2c2018-08-31 09:22:23 +0100499 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000500 SplitterQueueDescriptor queueDescriptor = workload->GetData();
501 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100502 BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100503
telsoa014fcda012018-03-09 14:13:49 +0000504 auto outputHandle0 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100505 BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100506
telsoa014fcda012018-03-09 14:13:49 +0000507 auto outputHandle1 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +0100508 BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100509
telsoa014fcda012018-03-09 14:13:49 +0000510 auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[2]);
telsoa01c577f2c2018-08-31 09:22:23 +0100511 BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000512}
513
514BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
515{
Ruomei Yan25339c32019-05-28 16:48:20 +0100516 RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000517}
518
519BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
520{
Ruomei Yan25339c32019-05-28 16:48:20 +0100521 RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000522}
523
Jim Flynne242f2d2019-05-22 14:24:13 +0100524template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
525static void RefCreateSplitterConcatWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000526{
telsoa01c577f2c2018-08-31 09:22:23 +0100527 // Tests that it is possible to decide which output of the splitter layer
Jim Flynne242f2d2019-05-22 14:24:13 +0100528 // should be lined to which input of the concat layer.
telsoa01c577f2c2018-08-31 09:22:23 +0100529 // We tested that is is possible to specify 0th output
Jim Flynne242f2d2019-05-22 14:24:13 +0100530 // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input
531 // of the concat.
telsoa014fcda012018-03-09 14:13:49 +0000532
533 Graph graph;
534 RefWorkloadFactory factory;
Jim Flynne242f2d2019-05-22 14:24:13 +0100535 auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
536 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000537
538 auto wlSplitter = std::move(workloads.first);
Jim Flynne242f2d2019-05-22 14:24:13 +0100539 auto wlConcat = std::move(workloads.second);
telsoa014fcda012018-03-09 14:13:49 +0000540
telsoa01c577f2c2018-08-31 09:22:23 +0100541 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000542 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
543 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
Jim Flynne242f2d2019-05-22 14:24:13 +0100544 armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
545 armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
telsoa014fcda012018-03-09 14:13:49 +0000546
547 BOOST_TEST(sOut0);
548 BOOST_TEST(sOut1);
549 BOOST_TEST(mIn0);
550 BOOST_TEST(mIn1);
551
552 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
553
554 BOOST_TEST(validDataPointers);
555}
556
Jim Flynne242f2d2019-05-22 14:24:13 +0100557BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
telsoa014fcda012018-03-09 14:13:49 +0000558{
Ruomei Yan25339c32019-05-28 16:48:20 +0100559 RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000560}
561
Jim Flynne242f2d2019-05-22 14:24:13 +0100562BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
telsoa014fcda012018-03-09 14:13:49 +0000563{
Ruomei Yan25339c32019-05-28 16:48:20 +0100564 RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000565}
566
telsoa01c577f2c2018-08-31 09:22:23 +0100567template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000568static void RefCreateSingleOutputMultipleInputsTest()
569{
telsoa01c577f2c2018-08-31 09:22:23 +0100570 // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
571 // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
telsoa014fcda012018-03-09 14:13:49 +0000572
573 Graph graph;
574 RefWorkloadFactory factory;
575 std::unique_ptr<SplitterWorkloadType> wlSplitter;
576 std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
577 std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
578 std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
579 std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
580
581 CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
telsoa01c577f2c2018-08-31 09:22:23 +0100582 ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000583
584 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
585 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
586 armnn::CpuTensorHandle* activ0_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
587 armnn::CpuTensorHandle* activ0_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
588 armnn::CpuTensorHandle* activ1_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
589 armnn::CpuTensorHandle* activ1_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
590
591
592 BOOST_TEST(sOut0);
593 BOOST_TEST(sOut1);
594 BOOST_TEST(activ0_0Im);
595 BOOST_TEST(activ0_1Im);
596 BOOST_TEST(activ1_0Im);
597 BOOST_TEST(activ1_1Im);
598
599 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
600 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
601
602 BOOST_TEST(validDataPointers);
603}
604
605BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
606{
Ruomei Yan25339c32019-05-28 16:48:20 +0100607 RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
telsoa01c577f2c2018-08-31 09:22:23 +0100608 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000609}
610
611BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
612{
Ruomei Yan25339c32019-05-28 16:48:20 +0100613 RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
telsoa01c577f2c2018-08-31 09:22:23 +0100614 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000615}
616
telsoa01c577f2c2018-08-31 09:22:23 +0100617template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
James Conroy59540822018-10-11 12:39:05 +0100618static void RefCreateResizeBilinearTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000619{
620 Graph graph;
621 RefWorkloadFactory factory;
James Conroy59540822018-10-11 12:39:05 +0100622 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
623
624 TensorShape inputShape;
625 TensorShape outputShape;
626
627 switch (dataLayout)
628 {
629 case DataLayout::NHWC:
630 inputShape = { 2, 4, 4, 3 };
631 outputShape = { 2, 2, 2, 3 };
632 break;
James Conroy69482272018-10-19 10:41:35 +0100633 case DataLayout::NCHW:
634 default:
James Conroy59540822018-10-11 12:39:05 +0100635 inputShape = { 2, 3, 4, 4 };
636 outputShape = { 2, 3, 2, 2 };
637 }
telsoa014fcda012018-03-09 14:13:49 +0000638
telsoa01c577f2c2018-08-31 09:22:23 +0100639 // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
James Conroy69482272018-10-19 10:41:35 +0100640 CheckInputOutput(std::move(workload),
641 TensorInfo(inputShape, DataType),
642 TensorInfo(outputShape, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000643}
644
645BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
646{
James Conroy59540822018-10-11 12:39:05 +0100647 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +0000648}
649
650BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
651{
James Conroy59540822018-10-11 12:39:05 +0100652 RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
653}
654
655BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
656{
657 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000658}
659
nikraj0133732f62019-06-04 15:35:34 +0100660template <typename RsqrtWorkloadType, armnn::DataType DataType>
661static void RefCreateRsqrtTest()
662{
663 Graph graph;
664 RefWorkloadFactory factory;
665
666 auto workload = CreateRsqrtWorkloadTest<RsqrtWorkloadType, DataType>(factory, graph);
667
668 // Checks that outputs are as we expect them (see definition of CreateRsqrtWorkloadTest).
669 CheckInputOutput(std::move(workload),
670 TensorInfo({ 1, 1 }, DataType),
671 TensorInfo({ 1, 1 }, DataType));
672
673}
674
675BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32)
676{
nikraj0199a66312019-06-06 10:31:27 +0100677 RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::Float32>();
nikraj0133732f62019-06-04 15:35:34 +0100678}
679
nikraj010421e7f2019-06-14 09:40:34 +0100680BOOST_AUTO_TEST_CASE(CreateRsqrtUint8)
681{
682 RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QuantisedAsymm8>();
683}
684
Matteo Martincighb63973e2018-10-16 16:23:33 +0100685template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
686static void RefCreateL2NormalizationTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000687{
688 Graph graph;
689 RefWorkloadFactory factory;
Matteo Martincighb63973e2018-10-16 16:23:33 +0100690 auto workload =
691 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
692
693 TensorShape inputShape;
694 TensorShape outputShape;
695
696 switch (dataLayout)
697 {
698 case DataLayout::NHWC:
699 inputShape = { 5, 50, 67, 20 };
700 outputShape = { 5, 50, 67, 20 };
701 break;
702 case DataLayout::NCHW:
703 default:
704 inputShape = { 5, 20, 50, 67 };
705 outputShape = { 5, 20, 50, 67 };
706 break;
707 }
telsoa014fcda012018-03-09 14:13:49 +0000708
telsoa01c577f2c2018-08-31 09:22:23 +0100709 // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
Matteo Martincighb63973e2018-10-16 16:23:33 +0100710 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
711}
712
713BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
714{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +0100715 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
Matteo Martincighb63973e2018-10-16 16:23:33 +0100716}
717
718BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
719{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +0100720 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
721}
722
723BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
724{
725 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
726}
727
728BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
729{
730 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000731}
732
Ferran Balaguerc6138d82019-06-13 17:23:50 +0100733BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
734{
735 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
736}
737
738BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
739{
740 RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
741}
742
telsoa01c577f2c2018-08-31 09:22:23 +0100743template <typename ReshapeWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000744static void RefCreateReshapeWorkloadTest()
745{
746 Graph graph;
747 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100748 auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000749
telsoa01c577f2c2018-08-31 09:22:23 +0100750 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000751 CheckInputOutput(
752 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100753 TensorInfo({ 4, 1 }, DataType),
754 TensorInfo({ 1, 4 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000755}
756
Nina Drozd8ed4b8c2019-05-29 10:41:04 +0100757BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
telsoa014fcda012018-03-09 14:13:49 +0000758{
Nina Drozd2f2778f2019-05-27 10:37:05 +0100759 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000760}
761
Nina Drozd8ed4b8c2019-05-29 10:41:04 +0100762BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
telsoa014fcda012018-03-09 14:13:49 +0000763{
Nina Drozd2f2778f2019-05-27 10:37:05 +0100764 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000765}
766
Nina Drozd8ed4b8c2019-05-29 10:41:04 +0100767BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
768{
769 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedSymm16>();
770}
771
Jim Flynne242f2d2019-05-22 14:24:13 +0100772template <typename ConcatWorkloadType, armnn::DataType DataType>
773static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
narpra015cdda352018-11-19 15:30:27 +0000774 unsigned int concatAxis)
775{
776 Graph graph;
777 RefWorkloadFactory factory;
Jim Flynne242f2d2019-05-22 14:24:13 +0100778 auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
narpra015cdda352018-11-19 15:30:27 +0000779
780 CheckInputsOutput(std::move(workload),
781 TensorInfo({ 2, 3, 2, 5 }, DataType),
782 TensorInfo({ 2, 3, 2, 5 }, DataType),
783 TensorInfo(outputShape, DataType));
784}
785
Jim Flynne242f2d2019-05-22 14:24:13 +0100786BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000787{
Jim Flynne242f2d2019-05-22 14:24:13 +0100788 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
narpra015cdda352018-11-19 15:30:27 +0000789}
790
Jim Flynne242f2d2019-05-22 14:24:13 +0100791BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
narpra015cdda352018-11-19 15:30:27 +0000792{
Jim Flynne242f2d2019-05-22 14:24:13 +0100793 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
Jim Flynncbb66aa2019-05-15 13:03:54 +0100794}
795
Jim Flynne242f2d2019-05-22 14:24:13 +0100796BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
Jim Flynncbb66aa2019-05-15 13:03:54 +0100797{
Jim Flynne242f2d2019-05-22 14:24:13 +0100798 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedSymm16>({ 4, 3, 2, 5 }, 0);
narpra015cdda352018-11-19 15:30:27 +0000799}
800
Jim Flynne242f2d2019-05-22 14:24:13 +0100801BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000802{
Jim Flynne242f2d2019-05-22 14:24:13 +0100803 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
narpra015cdda352018-11-19 15:30:27 +0000804}
805
Jim Flynne242f2d2019-05-22 14:24:13 +0100806BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
narpra015cdda352018-11-19 15:30:27 +0000807{
Jim Flynne242f2d2019-05-22 14:24:13 +0100808 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
narpra015cdda352018-11-19 15:30:27 +0000809}
810
Jim Flynne242f2d2019-05-22 14:24:13 +0100811BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000812{
Jim Flynne242f2d2019-05-22 14:24:13 +0100813 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
narpra015cdda352018-11-19 15:30:27 +0000814}
815
Jim Flynne242f2d2019-05-22 14:24:13 +0100816BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
narpra015cdda352018-11-19 15:30:27 +0000817{
Jim Flynne242f2d2019-05-22 14:24:13 +0100818 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
narpra015cdda352018-11-19 15:30:27 +0000819}
820
Jim Flynne242f2d2019-05-22 14:24:13 +0100821BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000822{
Jim Flynne242f2d2019-05-22 14:24:13 +0100823 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
narpra015cdda352018-11-19 15:30:27 +0000824}
825
Jim Flynne242f2d2019-05-22 14:24:13 +0100826BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
narpra015cdda352018-11-19 15:30:27 +0000827{
Jim Flynne242f2d2019-05-22 14:24:13 +0100828 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
narpra015cdda352018-11-19 15:30:27 +0000829}
830
Nina Drozd58ef2c62019-05-16 12:09:18 +0100831template <typename ConstantWorkloadType, armnn::DataType DataType>
832static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
833{
834 armnn::Graph graph;
835 RefWorkloadFactory factory;
836 auto workload = CreateConstantWorkloadTest<ConstantWorkloadType, DataType>(factory, graph, outputShape);
837
838 // Check output is as expected
839 auto queueDescriptor = workload->GetData();
840 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
841 BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
842}
843
844BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
845{
846 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 });
847}
848
849BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
850{
851 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedSymm16>({ 2, 3, 2, 10 });
852}
853
854BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
855{
856 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
857}
858
859BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload)
860{
861 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
862}
863
telsoa014fcda012018-03-09 14:13:49 +0000864BOOST_AUTO_TEST_SUITE_END()