blob: 86211229256028705e8f1db30fc7909c4145c936 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00006#include <test/CreateWorkload.hpp>
arovir0143095f32018-10-09 18:04:24 +01007
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/CpuTensorHandle.hpp>
9#include <reference/RefWorkloadFactory.hpp>
10#include <reference/workloads/RefWorkloads.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
12namespace
13{
14
15template<typename Workload>
16void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
17{
18 auto queueDescriptor = workload->GetData();
19 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
20 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
21 BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
22 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
23}
24
25template <typename Workload>
26void CheckInputsOutput(std::unique_ptr<Workload> workload,
27 const TensorInfo& inputInfo0,
28 const TensorInfo& inputInfo1,
29 const TensorInfo& outputInfo)
30{
31 auto queueDescriptor = workload->GetData();
32 auto inputHandle0 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
33 auto inputHandle1 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[1]);
34 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
35 BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
36 BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
37 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
38}
39}
40
41BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
42
telsoa01c577f2c2018-08-31 09:22:23 +010043template <typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +000044static void RefCreateActivationWorkloadTest()
45{
46 Graph graph;
47 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +010048 auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000049
telsoa01c577f2c2018-08-31 09:22:23 +010050 // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000051 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010052 TensorInfo({ 1, 1 }, DataType),
53 TensorInfo({ 1, 1 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000054}
55
56BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
57{
telsoa01c577f2c2018-08-31 09:22:23 +010058 RefCreateActivationWorkloadTest<RefActivationFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000059}
60
61BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
62{
telsoa01c577f2c2018-08-31 09:22:23 +010063 RefCreateActivationWorkloadTest<RefActivationUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +000064}
65
David Beckbc392452018-09-10 14:47:28 +010066template <typename WorkloadType,
67 typename DescriptorType,
68 typename LayerType,
69 armnn::DataType DataType>
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000070static void RefCreateElementwiseWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000071{
72 Graph graph;
73 RefWorkloadFactory factory;
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000074 auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(
75 factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000076
telsoa014fcda012018-03-09 14:13:49 +000077 CheckInputsOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010078 TensorInfo({ 2, 3 }, DataType),
79 TensorInfo({ 2, 3 }, DataType),
80 TensorInfo({ 2, 3 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000081}
82
83BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
84{
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000085 RefCreateElementwiseWorkloadTest<RefAdditionFloat32Workload,
86 AdditionQueueDescriptor,
87 AdditionLayer,
88 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000089}
90
91BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
92{
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000093 RefCreateElementwiseWorkloadTest<RefAdditionUint8Workload,
94 AdditionQueueDescriptor,
95 AdditionLayer,
96 armnn::DataType::QuantisedAsymm8>();
David Beckbc392452018-09-10 14:47:28 +010097}
98
99BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
100{
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000101 RefCreateElementwiseWorkloadTest<RefSubtractionFloat32Workload,
102 SubtractionQueueDescriptor,
103 SubtractionLayer,
104 armnn::DataType::Float32>();
David Beckbc392452018-09-10 14:47:28 +0100105}
106
107BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
108{
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000109 RefCreateElementwiseWorkloadTest<RefSubtractionUint8Workload,
110 SubtractionQueueDescriptor,
111 SubtractionLayer,
112 armnn::DataType::QuantisedAsymm8>();
David Beckbc392452018-09-10 14:47:28 +0100113}
114
115BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
116{
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000117 RefCreateElementwiseWorkloadTest<RefMultiplicationFloat32Workload,
118 MultiplicationQueueDescriptor,
119 MultiplicationLayer,
120 armnn::DataType::Float32>();
David Beckbc392452018-09-10 14:47:28 +0100121}
122
123BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
124{
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000125 RefCreateElementwiseWorkloadTest<RefMultiplicationUint8Workload,
126 MultiplicationQueueDescriptor,
127 MultiplicationLayer,
128 armnn::DataType::QuantisedAsymm8>();
David Beckbc392452018-09-10 14:47:28 +0100129}
130
131BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
132{
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000133 RefCreateElementwiseWorkloadTest<RefDivisionFloat32Workload,
134 DivisionQueueDescriptor,
135 DivisionLayer,
136 armnn::DataType::Float32>();
David Beckbc392452018-09-10 14:47:28 +0100137}
138
139BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
140{
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000141 RefCreateElementwiseWorkloadTest<RefDivisionUint8Workload,
142 DivisionQueueDescriptor,
143 DivisionLayer,
144 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000145}
146
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100147template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
148static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000149{
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100150 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000151 RefWorkloadFactory factory;
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100152 auto workload =
153 CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory, graph, dataLayout);
154
155 TensorShape inputShape;
156 TensorShape outputShape;
157
158 switch (dataLayout)
159 {
160 case DataLayout::NHWC:
Nikhil Rajd1340932018-10-18 14:27:50 +0100161 inputShape = { 2, 4, 4, 3 };
162 outputShape = { 2, 4, 4, 3 };
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100163 break;
164 case DataLayout::NCHW:
165 default:
Nikhil Rajd1340932018-10-18 14:27:50 +0100166 inputShape = { 2, 3, 4, 4 };
167 outputShape = { 2, 3, 4, 4 };
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100168 break;
169 }
telsoa014fcda012018-03-09 14:13:49 +0000170
telsoa01c577f2c2018-08-31 09:22:23 +0100171 // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100172 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
173}
174
175BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
176{
177 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload,armnn::DataType::Float32>
178 (DataLayout::NCHW);
179}
180
181BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
182{
183 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload, armnn::DataType::Float32>
184 (DataLayout::NHWC);
185}
186
187BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
188{
189 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationUint8Workload, armnn::DataType::QuantisedAsymm8>
190 (DataLayout::NCHW);
191}
192
193BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
194{
195 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationUint8Workload, armnn::DataType::QuantisedAsymm8>
196 (DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000197}
198
telsoa01c577f2c2018-08-31 09:22:23 +0100199BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
200{
201 Graph graph;
202 RefWorkloadFactory factory;
203 auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
204
205 // Checks that outputs and inputs are as we expect them
206 CheckInputOutput(
207 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
208}
209
210BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
211{
212 Graph graph;
213 RefWorkloadFactory factory;
214 auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
215
216 // Checks that outputs and inputs are as we expect them
217 CheckInputOutput(
218 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
219}
220
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100221static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000222{
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100223 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000224 RefWorkloadFactory factory;
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100225 auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dFloat32Workload, DataType::Float32>
226 (factory, graph, dataLayout);
227
228 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
229 std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
230 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
231 std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
telsoa014fcda012018-03-09 14:13:49 +0000232
telsoa01c577f2c2018-08-31 09:22:23 +0100233 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000234 CheckInputOutput(std::move(workload),
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100235 TensorInfo(inputShape, DataType::Float32),
236 TensorInfo(outputShape, DataType::Float32));
237}
238
239BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
240{
241 RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
242}
243
244BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
245{
246 RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000247}
248
telsoa01c577f2c2018-08-31 09:22:23 +0100249template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000250static void RefCreateFullyConnectedWorkloadTest()
251{
252 Graph graph;
253 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100254 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000255
telsoa01c577f2c2018-08-31 09:22:23 +0100256 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
257 float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
258 float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
telsoa014fcda012018-03-09 14:13:49 +0000259 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100260 TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
261 TensorInfo({ 3, 7 }, DataType, outputQScale));
telsoa014fcda012018-03-09 14:13:49 +0000262}
263
264BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32Workload)
265{
telsoa01c577f2c2018-08-31 09:22:23 +0100266 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000267}
268
269BOOST_AUTO_TEST_CASE(CreateFullyConnectedUint8Workload)
270{
telsoa01c577f2c2018-08-31 09:22:23 +0100271 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000272}
273
narpra0155a97bc2018-10-02 14:35:53 +0100274template <typename NormalizationWorkloadType, armnn::DataType DataType>
Matteo Martincigha160b242018-10-18 10:33:23 +0100275static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000276{
narpra0155a97bc2018-10-02 14:35:53 +0100277 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000278 RefWorkloadFactory factory;
Matteo Martincigha160b242018-10-18 10:33:23 +0100279 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
280
281 TensorShape inputShape;
282 TensorShape outputShape;
283
284 switch (dataLayout)
285 {
286 case DataLayout::NHWC:
287 inputShape = { 3, 1, 5, 5 };
288 outputShape = { 3, 1, 5, 5 };
289 break;
290 case DataLayout::NCHW:
291 default:
292 inputShape = { 3, 5, 5, 1 };
293 outputShape = { 3, 5, 5, 1 };
294 break;
295 }
telsoa014fcda012018-03-09 14:13:49 +0000296
telsoa01c577f2c2018-08-31 09:22:23 +0100297 // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
Matteo Martincigha160b242018-10-18 10:33:23 +0100298 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
narpra0155a97bc2018-10-02 14:35:53 +0100299}
300
301BOOST_AUTO_TEST_CASE(CreateRefNormalizationNchwWorkload)
302{
Matteo Martincigha160b242018-10-18 10:33:23 +0100303 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
304}
305
306BOOST_AUTO_TEST_CASE(CreateRefNormalizationNhwcWorkload)
307{
308 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000309}
310
telsoa01c577f2c2018-08-31 09:22:23 +0100311template <typename Pooling2dWorkloadType, armnn::DataType DataType>
James Conroy69482272018-10-19 10:41:35 +0100312static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000313{
314 Graph graph;
315 RefWorkloadFactory factory;
James Conroy69482272018-10-19 10:41:35 +0100316 auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
317
318 TensorShape inputShape;
319 TensorShape outputShape;
320
321 switch (dataLayout)
322 {
323 case DataLayout::NHWC:
324 inputShape = { 3, 5, 5, 2 };
325 outputShape = { 3, 2, 4, 2 };
326 break;
327 case DataLayout::NCHW:
328 default:
329 inputShape = { 3, 2, 5, 5 };
330 outputShape = { 3, 2, 2, 4 };
331 }
telsoa014fcda012018-03-09 14:13:49 +0000332
telsoa01c577f2c2018-08-31 09:22:23 +0100333 // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
James Conroy69482272018-10-19 10:41:35 +0100334 CheckInputOutput(std::move(workload),
335 TensorInfo(inputShape, DataType),
336 TensorInfo(outputShape, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000337}
338
339BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
340{
James Conroy69482272018-10-19 10:41:35 +0100341 RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
342}
343
344BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
345{
346 RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000347}
348
349BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
350{
James Conroy69482272018-10-19 10:41:35 +0100351 RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
352}
353
354BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
355{
356 RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000357}
358
telsoa01c577f2c2018-08-31 09:22:23 +0100359template <typename SoftmaxWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000360static void RefCreateSoftmaxWorkloadTest()
361{
362 Graph graph;
363 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100364 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000365
telsoa01c577f2c2018-08-31 09:22:23 +0100366 // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000367 CheckInputOutput(
368 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100369 TensorInfo({4, 1}, DataType),
370 TensorInfo({4, 1}, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000371}
372
373BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
374{
telsoa01c577f2c2018-08-31 09:22:23 +0100375 RefCreateSoftmaxWorkloadTest<RefSoftmaxFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000376}
377
378BOOST_AUTO_TEST_CASE(CreateSoftmaxUint8Workload)
379{
telsoa01c577f2c2018-08-31 09:22:23 +0100380 RefCreateSoftmaxWorkloadTest<RefSoftmaxUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000381}
382
telsoa01c577f2c2018-08-31 09:22:23 +0100383template <typename SplitterWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000384static void RefCreateSplitterWorkloadTest()
385{
386 Graph graph;
387 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100388 auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000389
telsoa01c577f2c2018-08-31 09:22:23 +0100390 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000391 SplitterQueueDescriptor queueDescriptor = workload->GetData();
392 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100393 BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100394
telsoa014fcda012018-03-09 14:13:49 +0000395 auto outputHandle0 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100396 BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100397
telsoa014fcda012018-03-09 14:13:49 +0000398 auto outputHandle1 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +0100399 BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100400
telsoa014fcda012018-03-09 14:13:49 +0000401 auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[2]);
telsoa01c577f2c2018-08-31 09:22:23 +0100402 BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000403}
404
405BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
406{
telsoa01c577f2c2018-08-31 09:22:23 +0100407 RefCreateSplitterWorkloadTest<RefSplitterFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000408}
409
410BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
411{
telsoa01c577f2c2018-08-31 09:22:23 +0100412 RefCreateSplitterWorkloadTest<RefSplitterUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000413}
414
telsoa01c577f2c2018-08-31 09:22:23 +0100415template <typename SplitterWorkloadType, typename MergerWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000416static void RefCreateSplitterMergerWorkloadTest()
417{
telsoa01c577f2c2018-08-31 09:22:23 +0100418 // Tests that it is possible to decide which output of the splitter layer
419 // should be lined to which input of the merger layer.
420 // We tested that is is possible to specify 0th output
421 // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
telsoa014fcda012018-03-09 14:13:49 +0000422 // of the merger.
423
424 Graph graph;
425 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100426 auto workloads = CreateSplitterMergerWorkloadTest<SplitterWorkloadType, MergerWorkloadType, DataType>
427 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000428
429 auto wlSplitter = std::move(workloads.first);
430 auto wlMerger = std::move(workloads.second);
431
telsoa01c577f2c2018-08-31 09:22:23 +0100432 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000433 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
434 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
435 armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
436 armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
437
438 BOOST_TEST(sOut0);
439 BOOST_TEST(sOut1);
440 BOOST_TEST(mIn0);
441 BOOST_TEST(mIn1);
442
443 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
444
445 BOOST_TEST(validDataPointers);
446}
447
448BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32)
449{
telsoa01c577f2c2018-08-31 09:22:23 +0100450 RefCreateSplitterMergerWorkloadTest<RefSplitterFloat32Workload, RefMergerFloat32Workload, DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000451}
452
453BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8)
454{
telsoa01c577f2c2018-08-31 09:22:23 +0100455 RefCreateSplitterMergerWorkloadTest<RefSplitterUint8Workload, RefMergerUint8Workload, DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000456}
457
telsoa01c577f2c2018-08-31 09:22:23 +0100458template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000459static void RefCreateSingleOutputMultipleInputsTest()
460{
telsoa01c577f2c2018-08-31 09:22:23 +0100461 // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
462 // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
telsoa014fcda012018-03-09 14:13:49 +0000463
464 Graph graph;
465 RefWorkloadFactory factory;
466 std::unique_ptr<SplitterWorkloadType> wlSplitter;
467 std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
468 std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
469 std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
470 std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
471
472 CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
telsoa01c577f2c2018-08-31 09:22:23 +0100473 ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000474
475 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
476 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
477 armnn::CpuTensorHandle* activ0_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
478 armnn::CpuTensorHandle* activ0_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
479 armnn::CpuTensorHandle* activ1_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
480 armnn::CpuTensorHandle* activ1_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
481
482
483 BOOST_TEST(sOut0);
484 BOOST_TEST(sOut1);
485 BOOST_TEST(activ0_0Im);
486 BOOST_TEST(activ0_1Im);
487 BOOST_TEST(activ1_0Im);
488 BOOST_TEST(activ1_1Im);
489
490 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
491 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
492
493 BOOST_TEST(validDataPointers);
494}
495
496BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
497{
telsoa01c577f2c2018-08-31 09:22:23 +0100498 RefCreateSingleOutputMultipleInputsTest<RefSplitterFloat32Workload, RefActivationFloat32Workload,
499 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000500}
501
502BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
503{
telsoa01c577f2c2018-08-31 09:22:23 +0100504 RefCreateSingleOutputMultipleInputsTest<RefSplitterUint8Workload, RefActivationUint8Workload,
505 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000506}
507
telsoa01c577f2c2018-08-31 09:22:23 +0100508template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
James Conroy59540822018-10-11 12:39:05 +0100509static void RefCreateResizeBilinearTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000510{
511 Graph graph;
512 RefWorkloadFactory factory;
James Conroy59540822018-10-11 12:39:05 +0100513 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
514
515 TensorShape inputShape;
516 TensorShape outputShape;
517
518 switch (dataLayout)
519 {
520 case DataLayout::NHWC:
521 inputShape = { 2, 4, 4, 3 };
522 outputShape = { 2, 2, 2, 3 };
523 break;
James Conroy69482272018-10-19 10:41:35 +0100524 case DataLayout::NCHW:
525 default:
James Conroy59540822018-10-11 12:39:05 +0100526 inputShape = { 2, 3, 4, 4 };
527 outputShape = { 2, 3, 2, 2 };
528 }
telsoa014fcda012018-03-09 14:13:49 +0000529
telsoa01c577f2c2018-08-31 09:22:23 +0100530 // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
James Conroy69482272018-10-19 10:41:35 +0100531 CheckInputOutput(std::move(workload),
532 TensorInfo(inputShape, DataType),
533 TensorInfo(outputShape, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000534}
535
536BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
537{
James Conroy59540822018-10-11 12:39:05 +0100538 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +0000539}
540
541BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
542{
James Conroy59540822018-10-11 12:39:05 +0100543 RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
544}
545
546BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
547{
548 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000549}
550
Matteo Martincighb63973e2018-10-16 16:23:33 +0100551template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
552static void RefCreateL2NormalizationTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000553{
554 Graph graph;
555 RefWorkloadFactory factory;
Matteo Martincighb63973e2018-10-16 16:23:33 +0100556 auto workload =
557 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
558
559 TensorShape inputShape;
560 TensorShape outputShape;
561
562 switch (dataLayout)
563 {
564 case DataLayout::NHWC:
565 inputShape = { 5, 50, 67, 20 };
566 outputShape = { 5, 50, 67, 20 };
567 break;
568 case DataLayout::NCHW:
569 default:
570 inputShape = { 5, 20, 50, 67 };
571 outputShape = { 5, 20, 50, 67 };
572 break;
573 }
telsoa014fcda012018-03-09 14:13:49 +0000574
telsoa01c577f2c2018-08-31 09:22:23 +0100575 // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
Matteo Martincighb63973e2018-10-16 16:23:33 +0100576 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
577}
578
579BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
580{
581 RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
582}
583
584BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
585{
586 RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000587}
588
telsoa01c577f2c2018-08-31 09:22:23 +0100589template <typename ReshapeWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000590static void RefCreateReshapeWorkloadTest()
591{
592 Graph graph;
593 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100594 auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000595
telsoa01c577f2c2018-08-31 09:22:23 +0100596 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000597 CheckInputOutput(
598 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100599 TensorInfo({ 4, 1 }, DataType),
600 TensorInfo({ 1, 4 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000601}
602
603BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload)
604{
telsoa01c577f2c2018-08-31 09:22:23 +0100605 RefCreateReshapeWorkloadTest<RefReshapeFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000606}
607
608BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
609{
telsoa01c577f2c2018-08-31 09:22:23 +0100610 RefCreateReshapeWorkloadTest<RefReshapeUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000611}
612
narpra015cdda352018-11-19 15:30:27 +0000613template <typename MergerWorkloadType, armnn::DataType DataType>
614static void RefCreateMergerWorkloadTest(const armnn::TensorShape& outputShape,
615 unsigned int concatAxis)
616{
617 Graph graph;
618 RefWorkloadFactory factory;
619 auto workload = CreateMergerWorkloadTest<MergerWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
620
621 CheckInputsOutput(std::move(workload),
622 TensorInfo({ 2, 3, 2, 5 }, DataType),
623 TensorInfo({ 2, 3, 2, 5 }, DataType),
624 TensorInfo(outputShape, DataType));
625}
626
627BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload)
628{
629 RefCreateMergerWorkloadTest<RefMergerFloat32Workload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
630}
631
632BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload)
633{
634 RefCreateMergerWorkloadTest<RefMergerUint8Workload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
635}
636
637BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload)
638{
639 RefCreateMergerWorkloadTest<RefMergerFloat32Workload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
640}
641
642BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload)
643{
644 RefCreateMergerWorkloadTest<RefMergerUint8Workload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
645}
646
647BOOST_AUTO_TEST_CASE(CreateMergerDim2Float32Workload)
648{
649 RefCreateMergerWorkloadTest<RefMergerFloat32Workload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
650}
651
652BOOST_AUTO_TEST_CASE(CreateMergerDim2Uint8Workload)
653{
654 RefCreateMergerWorkloadTest<RefMergerUint8Workload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
655}
656
657BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload)
658{
659 RefCreateMergerWorkloadTest<RefMergerFloat32Workload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
660}
661
662BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload)
663{
664 RefCreateMergerWorkloadTest<RefMergerUint8Workload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
665}
666
telsoa014fcda012018-03-09 14:13:49 +0000667BOOST_AUTO_TEST_SUITE_END()