blob: 0311276f1086d0ea5a7ed55e76c2b4f8cf13d8c7 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00006#include <test/CreateWorkload.hpp>
arovir0143095f32018-10-09 18:04:24 +01007
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/CpuTensorHandle.hpp>
9#include <reference/RefWorkloadFactory.hpp>
10#include <reference/workloads/RefWorkloads.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
12namespace
13{
14
15template<typename Workload>
16void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
17{
18 auto queueDescriptor = workload->GetData();
19 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
20 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
21 BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
22 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
23}
24
25template <typename Workload>
26void CheckInputsOutput(std::unique_ptr<Workload> workload,
27 const TensorInfo& inputInfo0,
28 const TensorInfo& inputInfo1,
29 const TensorInfo& outputInfo)
30{
31 auto queueDescriptor = workload->GetData();
32 auto inputHandle0 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
33 auto inputHandle1 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[1]);
34 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
35 BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
36 BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
37 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
38}
39}
40
41BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
42
telsoa01c577f2c2018-08-31 09:22:23 +010043template <typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +000044static void RefCreateActivationWorkloadTest()
45{
46 Graph graph;
47 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +010048 auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000049
telsoa01c577f2c2018-08-31 09:22:23 +010050 // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000051 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010052 TensorInfo({ 1, 1 }, DataType),
53 TensorInfo({ 1, 1 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000054}
55
56BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
57{
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +010058 RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000059}
60
61BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
62{
Nattapat Chaimanowongae2c5f02019-04-24 16:19:57 +010063 RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +000064}
65
David Beckbc392452018-09-10 14:47:28 +010066template <typename WorkloadType,
67 typename DescriptorType,
68 typename LayerType,
69 armnn::DataType DataType>
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000070static void RefCreateElementwiseWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000071{
72 Graph graph;
73 RefWorkloadFactory factory;
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000074 auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(
75 factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000076
telsoa014fcda012018-03-09 14:13:49 +000077 CheckInputsOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010078 TensorInfo({ 2, 3 }, DataType),
79 TensorInfo({ 2, 3 }, DataType),
80 TensorInfo({ 2, 3 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000081}
82
83BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
84{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +010085 RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000086 AdditionQueueDescriptor,
87 AdditionLayer,
88 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000089}
90
91BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
92{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +010093 RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000094 AdditionQueueDescriptor,
95 AdditionLayer,
96 armnn::DataType::QuantisedAsymm8>();
David Beckbc392452018-09-10 14:47:28 +010097}
98
Sadik Armagan2999a022019-04-09 14:20:12 +010099BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
100{
101 RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
102 AdditionQueueDescriptor,
103 AdditionLayer,
104 armnn::DataType::QuantisedSymm16>();
105}
106
David Beckbc392452018-09-10 14:47:28 +0100107BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
108{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100109 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000110 SubtractionQueueDescriptor,
111 SubtractionLayer,
112 armnn::DataType::Float32>();
David Beckbc392452018-09-10 14:47:28 +0100113}
114
115BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
116{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100117 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000118 SubtractionQueueDescriptor,
119 SubtractionLayer,
120 armnn::DataType::QuantisedAsymm8>();
David Beckbc392452018-09-10 14:47:28 +0100121}
122
Sadik Armagan2999a022019-04-09 14:20:12 +0100123BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
124{
125 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
126 SubtractionQueueDescriptor,
127 SubtractionLayer,
128 armnn::DataType::QuantisedSymm16>();
129}
130
David Beckbc392452018-09-10 14:47:28 +0100131BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
132{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100133 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000134 MultiplicationQueueDescriptor,
135 MultiplicationLayer,
136 armnn::DataType::Float32>();
David Beckbc392452018-09-10 14:47:28 +0100137}
138
139BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
140{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100141 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000142 MultiplicationQueueDescriptor,
143 MultiplicationLayer,
144 armnn::DataType::QuantisedAsymm8>();
David Beckbc392452018-09-10 14:47:28 +0100145}
146
Sadik Armagan2999a022019-04-09 14:20:12 +0100147BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
148{
149 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
150 MultiplicationQueueDescriptor,
151 MultiplicationLayer,
152 armnn::DataType::QuantisedSymm16>();
153}
154
David Beckbc392452018-09-10 14:47:28 +0100155BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
156{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100157 RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000158 DivisionQueueDescriptor,
159 DivisionLayer,
160 armnn::DataType::Float32>();
David Beckbc392452018-09-10 14:47:28 +0100161}
162
163BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
164{
Sadik Armagan2e6dc3a2019-04-03 17:48:18 +0100165 RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000166 DivisionQueueDescriptor,
167 DivisionLayer,
168 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000169}
170
Sadik Armagan2999a022019-04-09 14:20:12 +0100171BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
172{
173 RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
174 DivisionQueueDescriptor,
175 DivisionLayer,
176 armnn::DataType::QuantisedSymm16>();
177}
178
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100179template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
180static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000181{
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100182 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000183 RefWorkloadFactory factory;
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100184 auto workload =
185 CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory, graph, dataLayout);
186
187 TensorShape inputShape;
188 TensorShape outputShape;
189
190 switch (dataLayout)
191 {
192 case DataLayout::NHWC:
Nikhil Rajd1340932018-10-18 14:27:50 +0100193 inputShape = { 2, 4, 4, 3 };
194 outputShape = { 2, 4, 4, 3 };
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100195 break;
196 case DataLayout::NCHW:
197 default:
Nikhil Rajd1340932018-10-18 14:27:50 +0100198 inputShape = { 2, 3, 4, 4 };
199 outputShape = { 2, 3, 4, 4 };
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100200 break;
201 }
telsoa014fcda012018-03-09 14:13:49 +0000202
telsoa01c577f2c2018-08-31 09:22:23 +0100203 // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100204 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
205}
206
207BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
208{
209 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload,armnn::DataType::Float32>
210 (DataLayout::NCHW);
211}
212
213BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
214{
215 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload, armnn::DataType::Float32>
216 (DataLayout::NHWC);
217}
218
219BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
220{
221 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationUint8Workload, armnn::DataType::QuantisedAsymm8>
222 (DataLayout::NCHW);
223}
224
225BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
226{
227 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationUint8Workload, armnn::DataType::QuantisedAsymm8>
228 (DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000229}
230
telsoa01c577f2c2018-08-31 09:22:23 +0100231BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
232{
233 Graph graph;
234 RefWorkloadFactory factory;
235 auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
236
237 // Checks that outputs and inputs are as we expect them
238 CheckInputOutput(
239 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
240}
241
242BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
243{
244 Graph graph;
245 RefWorkloadFactory factory;
246 auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
247
248 // Checks that outputs and inputs are as we expect them
249 CheckInputOutput(
250 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
251}
252
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100253static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000254{
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100255 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000256 RefWorkloadFactory factory;
Mike Kelly9b398322019-05-22 17:21:49 +0100257 auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100258 (factory, graph, dataLayout);
259
260 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
261 std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
262 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
263 std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
telsoa014fcda012018-03-09 14:13:49 +0000264
telsoa01c577f2c2018-08-31 09:22:23 +0100265 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000266 CheckInputOutput(std::move(workload),
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100267 TensorInfo(inputShape, DataType::Float32),
268 TensorInfo(outputShape, DataType::Float32));
269}
270
271BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
272{
273 RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
274}
275
276BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
277{
278 RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000279}
280
Ruomei Yan495852f2019-05-23 11:37:33 +0100281static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
282{
283 Graph graph;
284 RefWorkloadFactory factory;
285 auto workload = CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dWorkload, DataType::Float32>
286 (factory, graph, dataLayout);
287
288 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
289 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
290 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
291 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
292 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
293 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
294 // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
295 CheckInputOutput(std::move(workload),
296 TensorInfo(inputShape, DataType::Float32),
297 TensorInfo(outputShape, DataType::Float32));
298}
299
300BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
301{
302 RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
303}
304
telsoa01c577f2c2018-08-31 09:22:23 +0100305template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000306static void RefCreateFullyConnectedWorkloadTest()
307{
308 Graph graph;
309 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100310 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000311
telsoa01c577f2c2018-08-31 09:22:23 +0100312 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
313 float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
314 float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
telsoa014fcda012018-03-09 14:13:49 +0000315 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100316 TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
317 TensorInfo({ 3, 7 }, DataType, outputQScale));
telsoa014fcda012018-03-09 14:13:49 +0000318}
319
Francis Murtagh43aec582019-05-27 12:14:10 +0100320BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
telsoa014fcda012018-03-09 14:13:49 +0000321{
Francis Murtagh43aec582019-05-27 12:14:10 +0100322 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000323}
324
Francis Murtagh43aec582019-05-27 12:14:10 +0100325BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
telsoa014fcda012018-03-09 14:13:49 +0000326{
Francis Murtagh43aec582019-05-27 12:14:10 +0100327 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000328}
329
Francis Murtagh46c09d02019-05-28 08:15:28 +0100330BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm16)
331{
332 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedSymm16>();
333}
334
narpra0155a97bc2018-10-02 14:35:53 +0100335template <typename NormalizationWorkloadType, armnn::DataType DataType>
Matteo Martincigha160b242018-10-18 10:33:23 +0100336static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000337{
narpra0155a97bc2018-10-02 14:35:53 +0100338 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000339 RefWorkloadFactory factory;
Matteo Martincigha160b242018-10-18 10:33:23 +0100340 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
341
342 TensorShape inputShape;
343 TensorShape outputShape;
344
345 switch (dataLayout)
346 {
347 case DataLayout::NHWC:
348 inputShape = { 3, 1, 5, 5 };
349 outputShape = { 3, 1, 5, 5 };
350 break;
351 case DataLayout::NCHW:
352 default:
353 inputShape = { 3, 5, 5, 1 };
354 outputShape = { 3, 5, 5, 1 };
355 break;
356 }
telsoa014fcda012018-03-09 14:13:49 +0000357
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
Matteo Martincigha160b242018-10-18 10:33:23 +0100359 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
narpra0155a97bc2018-10-02 14:35:53 +0100360}
361
362BOOST_AUTO_TEST_CASE(CreateRefNormalizationNchwWorkload)
363{
Matteo Martincigha160b242018-10-18 10:33:23 +0100364 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
365}
366
367BOOST_AUTO_TEST_CASE(CreateRefNormalizationNhwcWorkload)
368{
369 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000370}
371
telsoa01c577f2c2018-08-31 09:22:23 +0100372template <typename Pooling2dWorkloadType, armnn::DataType DataType>
James Conroy69482272018-10-19 10:41:35 +0100373static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000374{
375 Graph graph;
376 RefWorkloadFactory factory;
James Conroy69482272018-10-19 10:41:35 +0100377 auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
378
379 TensorShape inputShape;
380 TensorShape outputShape;
381
382 switch (dataLayout)
383 {
384 case DataLayout::NHWC:
385 inputShape = { 3, 5, 5, 2 };
386 outputShape = { 3, 2, 4, 2 };
387 break;
388 case DataLayout::NCHW:
389 default:
390 inputShape = { 3, 2, 5, 5 };
391 outputShape = { 3, 2, 2, 4 };
392 }
telsoa014fcda012018-03-09 14:13:49 +0000393
telsoa01c577f2c2018-08-31 09:22:23 +0100394 // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
James Conroy69482272018-10-19 10:41:35 +0100395 CheckInputOutput(std::move(workload),
396 TensorInfo(inputShape, DataType),
397 TensorInfo(outputShape, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000398}
399
400BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
401{
James Conroy69482272018-10-19 10:41:35 +0100402 RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
403}
404
405BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
406{
407 RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000408}
409
410BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
411{
James Conroy69482272018-10-19 10:41:35 +0100412 RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
413}
414
415BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
416{
417 RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000418}
419
telsoa01c577f2c2018-08-31 09:22:23 +0100420template <typename SoftmaxWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000421static void RefCreateSoftmaxWorkloadTest()
422{
423 Graph graph;
424 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100425 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000426
telsoa01c577f2c2018-08-31 09:22:23 +0100427 // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000428 CheckInputOutput(
429 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100430 TensorInfo({4, 1}, DataType),
431 TensorInfo({4, 1}, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000432}
433
434BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
435{
nikraj01a121de32019-05-29 10:51:05 +0100436 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000437}
438
nikraj01a121de32019-05-29 10:51:05 +0100439BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
telsoa014fcda012018-03-09 14:13:49 +0000440{
nikraj01a121de32019-05-29 10:51:05 +0100441 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000442}
443
nikraj01248683f2019-05-29 16:46:50 +0100444BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
445{
446 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedSymm16>();
447}
448
telsoa01c577f2c2018-08-31 09:22:23 +0100449template <typename SplitterWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000450static void RefCreateSplitterWorkloadTest()
451{
452 Graph graph;
453 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100454 auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000455
telsoa01c577f2c2018-08-31 09:22:23 +0100456 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000457 SplitterQueueDescriptor queueDescriptor = workload->GetData();
458 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100459 BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100460
telsoa014fcda012018-03-09 14:13:49 +0000461 auto outputHandle0 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100462 BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100463
telsoa014fcda012018-03-09 14:13:49 +0000464 auto outputHandle1 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +0100465 BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100466
telsoa014fcda012018-03-09 14:13:49 +0000467 auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[2]);
telsoa01c577f2c2018-08-31 09:22:23 +0100468 BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000469}
470
471BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
472{
Ruomei Yan25339c32019-05-28 16:48:20 +0100473 RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000474}
475
476BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
477{
Ruomei Yan25339c32019-05-28 16:48:20 +0100478 RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000479}
480
Jim Flynne242f2d2019-05-22 14:24:13 +0100481template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
482static void RefCreateSplitterConcatWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000483{
telsoa01c577f2c2018-08-31 09:22:23 +0100484 // Tests that it is possible to decide which output of the splitter layer
Jim Flynne242f2d2019-05-22 14:24:13 +0100485 // should be lined to which input of the concat layer.
telsoa01c577f2c2018-08-31 09:22:23 +0100486 // We tested that is is possible to specify 0th output
Jim Flynne242f2d2019-05-22 14:24:13 +0100487 // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input
488 // of the concat.
telsoa014fcda012018-03-09 14:13:49 +0000489
490 Graph graph;
491 RefWorkloadFactory factory;
Jim Flynne242f2d2019-05-22 14:24:13 +0100492 auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
493 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000494
495 auto wlSplitter = std::move(workloads.first);
Jim Flynne242f2d2019-05-22 14:24:13 +0100496 auto wlConcat = std::move(workloads.second);
telsoa014fcda012018-03-09 14:13:49 +0000497
telsoa01c577f2c2018-08-31 09:22:23 +0100498 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000499 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
500 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
Jim Flynne242f2d2019-05-22 14:24:13 +0100501 armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
502 armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
telsoa014fcda012018-03-09 14:13:49 +0000503
504 BOOST_TEST(sOut0);
505 BOOST_TEST(sOut1);
506 BOOST_TEST(mIn0);
507 BOOST_TEST(mIn1);
508
509 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
510
511 BOOST_TEST(validDataPointers);
512}
513
Jim Flynne242f2d2019-05-22 14:24:13 +0100514BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
telsoa014fcda012018-03-09 14:13:49 +0000515{
Ruomei Yan25339c32019-05-28 16:48:20 +0100516 RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000517}
518
Jim Flynne242f2d2019-05-22 14:24:13 +0100519BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
telsoa014fcda012018-03-09 14:13:49 +0000520{
Ruomei Yan25339c32019-05-28 16:48:20 +0100521 RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000522}
523
telsoa01c577f2c2018-08-31 09:22:23 +0100524template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000525static void RefCreateSingleOutputMultipleInputsTest()
526{
telsoa01c577f2c2018-08-31 09:22:23 +0100527 // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
528 // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
telsoa014fcda012018-03-09 14:13:49 +0000529
530 Graph graph;
531 RefWorkloadFactory factory;
532 std::unique_ptr<SplitterWorkloadType> wlSplitter;
533 std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
534 std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
535 std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
536 std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
537
538 CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
telsoa01c577f2c2018-08-31 09:22:23 +0100539 ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000540
541 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
542 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
543 armnn::CpuTensorHandle* activ0_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
544 armnn::CpuTensorHandle* activ0_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
545 armnn::CpuTensorHandle* activ1_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
546 armnn::CpuTensorHandle* activ1_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
547
548
549 BOOST_TEST(sOut0);
550 BOOST_TEST(sOut1);
551 BOOST_TEST(activ0_0Im);
552 BOOST_TEST(activ0_1Im);
553 BOOST_TEST(activ1_0Im);
554 BOOST_TEST(activ1_1Im);
555
556 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
557 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
558
559 BOOST_TEST(validDataPointers);
560}
561
562BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
563{
Ruomei Yan25339c32019-05-28 16:48:20 +0100564 RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
telsoa01c577f2c2018-08-31 09:22:23 +0100565 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000566}
567
568BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
569{
Ruomei Yan25339c32019-05-28 16:48:20 +0100570 RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
telsoa01c577f2c2018-08-31 09:22:23 +0100571 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000572}
573
telsoa01c577f2c2018-08-31 09:22:23 +0100574template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
James Conroy59540822018-10-11 12:39:05 +0100575static void RefCreateResizeBilinearTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000576{
577 Graph graph;
578 RefWorkloadFactory factory;
James Conroy59540822018-10-11 12:39:05 +0100579 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
580
581 TensorShape inputShape;
582 TensorShape outputShape;
583
584 switch (dataLayout)
585 {
586 case DataLayout::NHWC:
587 inputShape = { 2, 4, 4, 3 };
588 outputShape = { 2, 2, 2, 3 };
589 break;
James Conroy69482272018-10-19 10:41:35 +0100590 case DataLayout::NCHW:
591 default:
James Conroy59540822018-10-11 12:39:05 +0100592 inputShape = { 2, 3, 4, 4 };
593 outputShape = { 2, 3, 2, 2 };
594 }
telsoa014fcda012018-03-09 14:13:49 +0000595
telsoa01c577f2c2018-08-31 09:22:23 +0100596 // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
James Conroy69482272018-10-19 10:41:35 +0100597 CheckInputOutput(std::move(workload),
598 TensorInfo(inputShape, DataType),
599 TensorInfo(outputShape, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000600}
601
602BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
603{
James Conroy59540822018-10-11 12:39:05 +0100604 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +0000605}
606
607BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
608{
James Conroy59540822018-10-11 12:39:05 +0100609 RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
610}
611
612BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
613{
614 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000615}
616
Matteo Martincighb63973e2018-10-16 16:23:33 +0100617template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
618static void RefCreateL2NormalizationTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000619{
620 Graph graph;
621 RefWorkloadFactory factory;
Matteo Martincighb63973e2018-10-16 16:23:33 +0100622 auto workload =
623 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
624
625 TensorShape inputShape;
626 TensorShape outputShape;
627
628 switch (dataLayout)
629 {
630 case DataLayout::NHWC:
631 inputShape = { 5, 50, 67, 20 };
632 outputShape = { 5, 50, 67, 20 };
633 break;
634 case DataLayout::NCHW:
635 default:
636 inputShape = { 5, 20, 50, 67 };
637 outputShape = { 5, 20, 50, 67 };
638 break;
639 }
telsoa014fcda012018-03-09 14:13:49 +0000640
telsoa01c577f2c2018-08-31 09:22:23 +0100641 // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
Matteo Martincighb63973e2018-10-16 16:23:33 +0100642 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
643}
644
645BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
646{
647 RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
648}
649
650BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
651{
652 RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000653}
654
telsoa01c577f2c2018-08-31 09:22:23 +0100655template <typename ReshapeWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000656static void RefCreateReshapeWorkloadTest()
657{
658 Graph graph;
659 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100660 auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000661
telsoa01c577f2c2018-08-31 09:22:23 +0100662 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000663 CheckInputOutput(
664 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100665 TensorInfo({ 4, 1 }, DataType),
666 TensorInfo({ 1, 4 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000667}
668
Nina Drozd8ed4b8c2019-05-29 10:41:04 +0100669BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
telsoa014fcda012018-03-09 14:13:49 +0000670{
Nina Drozd2f2778f2019-05-27 10:37:05 +0100671 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000672}
673
Nina Drozd8ed4b8c2019-05-29 10:41:04 +0100674BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
telsoa014fcda012018-03-09 14:13:49 +0000675{
Nina Drozd2f2778f2019-05-27 10:37:05 +0100676 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000677}
678
Nina Drozd8ed4b8c2019-05-29 10:41:04 +0100679BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
680{
681 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedSymm16>();
682}
683
Jim Flynne242f2d2019-05-22 14:24:13 +0100684template <typename ConcatWorkloadType, armnn::DataType DataType>
685static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
narpra015cdda352018-11-19 15:30:27 +0000686 unsigned int concatAxis)
687{
688 Graph graph;
689 RefWorkloadFactory factory;
Jim Flynne242f2d2019-05-22 14:24:13 +0100690 auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
narpra015cdda352018-11-19 15:30:27 +0000691
692 CheckInputsOutput(std::move(workload),
693 TensorInfo({ 2, 3, 2, 5 }, DataType),
694 TensorInfo({ 2, 3, 2, 5 }, DataType),
695 TensorInfo(outputShape, DataType));
696}
697
Jim Flynne242f2d2019-05-22 14:24:13 +0100698BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000699{
Jim Flynne242f2d2019-05-22 14:24:13 +0100700 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
narpra015cdda352018-11-19 15:30:27 +0000701}
702
Jim Flynne242f2d2019-05-22 14:24:13 +0100703BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
narpra015cdda352018-11-19 15:30:27 +0000704{
Jim Flynne242f2d2019-05-22 14:24:13 +0100705 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
Jim Flynncbb66aa2019-05-15 13:03:54 +0100706}
707
Jim Flynne242f2d2019-05-22 14:24:13 +0100708BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
Jim Flynncbb66aa2019-05-15 13:03:54 +0100709{
Jim Flynne242f2d2019-05-22 14:24:13 +0100710 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedSymm16>({ 4, 3, 2, 5 }, 0);
narpra015cdda352018-11-19 15:30:27 +0000711}
712
Jim Flynne242f2d2019-05-22 14:24:13 +0100713BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000714{
Jim Flynne242f2d2019-05-22 14:24:13 +0100715 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
narpra015cdda352018-11-19 15:30:27 +0000716}
717
Jim Flynne242f2d2019-05-22 14:24:13 +0100718BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
narpra015cdda352018-11-19 15:30:27 +0000719{
Jim Flynne242f2d2019-05-22 14:24:13 +0100720 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
narpra015cdda352018-11-19 15:30:27 +0000721}
722
Jim Flynne242f2d2019-05-22 14:24:13 +0100723BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000724{
Jim Flynne242f2d2019-05-22 14:24:13 +0100725 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
narpra015cdda352018-11-19 15:30:27 +0000726}
727
Jim Flynne242f2d2019-05-22 14:24:13 +0100728BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
narpra015cdda352018-11-19 15:30:27 +0000729{
Jim Flynne242f2d2019-05-22 14:24:13 +0100730 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
narpra015cdda352018-11-19 15:30:27 +0000731}
732
Jim Flynne242f2d2019-05-22 14:24:13 +0100733BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000734{
Jim Flynne242f2d2019-05-22 14:24:13 +0100735 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
narpra015cdda352018-11-19 15:30:27 +0000736}
737
Jim Flynne242f2d2019-05-22 14:24:13 +0100738BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
narpra015cdda352018-11-19 15:30:27 +0000739{
Jim Flynne242f2d2019-05-22 14:24:13 +0100740 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
narpra015cdda352018-11-19 15:30:27 +0000741}
742
Nina Drozd58ef2c62019-05-16 12:09:18 +0100743template <typename ConstantWorkloadType, armnn::DataType DataType>
744static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
745{
746 armnn::Graph graph;
747 RefWorkloadFactory factory;
748 auto workload = CreateConstantWorkloadTest<ConstantWorkloadType, DataType>(factory, graph, outputShape);
749
750 // Check output is as expected
751 auto queueDescriptor = workload->GetData();
752 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
753 BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
754}
755
756BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
757{
758 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 });
759}
760
761BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
762{
763 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedSymm16>({ 2, 3, 2, 10 });
764}
765
766BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
767{
768 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
769}
770
771BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload)
772{
773 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
774}
775
telsoa014fcda012018-03-09 14:13:49 +0000776BOOST_AUTO_TEST_SUITE_END()