blob: d03fe5cb9f4aa06dc5fe0af1bff64f33a91963b2 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00006#include <test/CreateWorkload.hpp>
arovir0143095f32018-10-09 18:04:24 +01007
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/CpuTensorHandle.hpp>
9#include <reference/RefWorkloadFactory.hpp>
10#include <reference/workloads/RefWorkloads.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
12namespace
13{
14
15template<typename Workload>
16void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
17{
18 auto queueDescriptor = workload->GetData();
19 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
20 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
21 BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
22 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
23}
24
25template <typename Workload>
26void CheckInputsOutput(std::unique_ptr<Workload> workload,
27 const TensorInfo& inputInfo0,
28 const TensorInfo& inputInfo1,
29 const TensorInfo& outputInfo)
30{
31 auto queueDescriptor = workload->GetData();
32 auto inputHandle0 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
33 auto inputHandle1 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[1]);
34 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
35 BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
36 BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
37 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
38}
39}
40
41BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
42
telsoa01c577f2c2018-08-31 09:22:23 +010043template <typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +000044static void RefCreateActivationWorkloadTest()
45{
46 Graph graph;
47 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +010048 auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000049
telsoa01c577f2c2018-08-31 09:22:23 +010050 // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000051 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010052 TensorInfo({ 1, 1 }, DataType),
53 TensorInfo({ 1, 1 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000054}
55
56BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
57{
telsoa01c577f2c2018-08-31 09:22:23 +010058 RefCreateActivationWorkloadTest<RefActivationFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000059}
60
61BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
62{
telsoa01c577f2c2018-08-31 09:22:23 +010063 RefCreateActivationWorkloadTest<RefActivationUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +000064}
65
David Beckbc392452018-09-10 14:47:28 +010066template <typename WorkloadType,
67 typename DescriptorType,
68 typename LayerType,
69 armnn::DataType DataType>
70static void RefCreateArithmethicWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000071{
72 Graph graph;
73 RefWorkloadFactory factory;
David Beckbc392452018-09-10 14:47:28 +010074 auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000075
telsoa014fcda012018-03-09 14:13:49 +000076 CheckInputsOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010077 TensorInfo({ 2, 3 }, DataType),
78 TensorInfo({ 2, 3 }, DataType),
79 TensorInfo({ 2, 3 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000080}
81
82BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
83{
David Beckbc392452018-09-10 14:47:28 +010084 RefCreateArithmethicWorkloadTest<RefAdditionFloat32Workload,
85 AdditionQueueDescriptor,
86 AdditionLayer,
87 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000088}
89
90BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
91{
David Beckbc392452018-09-10 14:47:28 +010092 RefCreateArithmethicWorkloadTest<RefAdditionUint8Workload,
93 AdditionQueueDescriptor,
94 AdditionLayer,
95 armnn::DataType::QuantisedAsymm8>();
96}
97
98BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
99{
100 RefCreateArithmethicWorkloadTest<RefSubtractionFloat32Workload,
101 SubtractionQueueDescriptor,
102 SubtractionLayer,
103 armnn::DataType::Float32>();
104}
105
106BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
107{
108 RefCreateArithmethicWorkloadTest<RefSubtractionUint8Workload,
109 SubtractionQueueDescriptor,
110 SubtractionLayer,
111 armnn::DataType::QuantisedAsymm8>();
112}
113
114BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
115{
116 RefCreateArithmethicWorkloadTest<RefMultiplicationFloat32Workload,
117 MultiplicationQueueDescriptor,
118 MultiplicationLayer,
119 armnn::DataType::Float32>();
120}
121
122BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
123{
124 RefCreateArithmethicWorkloadTest<RefMultiplicationUint8Workload,
125 MultiplicationQueueDescriptor,
126 MultiplicationLayer,
127 armnn::DataType::QuantisedAsymm8>();
128}
129
130BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
131{
132 RefCreateArithmethicWorkloadTest<RefDivisionFloat32Workload,
133 DivisionQueueDescriptor,
134 DivisionLayer,
135 armnn::DataType::Float32>();
136}
137
138BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
139{
140 RefCreateArithmethicWorkloadTest<RefDivisionUint8Workload,
141 DivisionQueueDescriptor,
142 DivisionLayer,
143 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000144}
145
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100146template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
147static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000148{
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100149 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000150 RefWorkloadFactory factory;
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100151 auto workload =
152 CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory, graph, dataLayout);
153
154 TensorShape inputShape;
155 TensorShape outputShape;
156
157 switch (dataLayout)
158 {
159 case DataLayout::NHWC:
Nikhil Rajd1340932018-10-18 14:27:50 +0100160 inputShape = { 2, 4, 4, 3 };
161 outputShape = { 2, 4, 4, 3 };
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100162 break;
163 case DataLayout::NCHW:
164 default:
Nikhil Rajd1340932018-10-18 14:27:50 +0100165 inputShape = { 2, 3, 4, 4 };
166 outputShape = { 2, 3, 4, 4 };
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100167 break;
168 }
telsoa014fcda012018-03-09 14:13:49 +0000169
telsoa01c577f2c2018-08-31 09:22:23 +0100170 // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
Matteo Martincigh3dc43032018-10-18 10:55:19 +0100171 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
172}
173
174BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
175{
176 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload,armnn::DataType::Float32>
177 (DataLayout::NCHW);
178}
179
180BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
181{
182 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload, armnn::DataType::Float32>
183 (DataLayout::NHWC);
184}
185
186BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
187{
188 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationUint8Workload, armnn::DataType::QuantisedAsymm8>
189 (DataLayout::NCHW);
190}
191
192BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
193{
194 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationUint8Workload, armnn::DataType::QuantisedAsymm8>
195 (DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000196}
197
telsoa01c577f2c2018-08-31 09:22:23 +0100198BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
199{
200 Graph graph;
201 RefWorkloadFactory factory;
202 auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
203
204 // Checks that outputs and inputs are as we expect them
205 CheckInputOutput(
206 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
207}
208
209BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
210{
211 Graph graph;
212 RefWorkloadFactory factory;
213 auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
214
215 // Checks that outputs and inputs are as we expect them
216 CheckInputOutput(
217 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
218}
219
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100220static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000221{
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100222 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000223 RefWorkloadFactory factory;
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100224 auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dFloat32Workload, DataType::Float32>
225 (factory, graph, dataLayout);
226
227 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
228 std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
229 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
230 std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
telsoa014fcda012018-03-09 14:13:49 +0000231
telsoa01c577f2c2018-08-31 09:22:23 +0100232 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000233 CheckInputOutput(std::move(workload),
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100234 TensorInfo(inputShape, DataType::Float32),
235 TensorInfo(outputShape, DataType::Float32));
236}
237
238BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
239{
240 RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
241}
242
243BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
244{
245 RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000246}
247
telsoa01c577f2c2018-08-31 09:22:23 +0100248template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000249static void RefCreateFullyConnectedWorkloadTest()
250{
251 Graph graph;
252 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100253 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000254
telsoa01c577f2c2018-08-31 09:22:23 +0100255 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
256 float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
257 float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
telsoa014fcda012018-03-09 14:13:49 +0000258 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100259 TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
260 TensorInfo({ 3, 7 }, DataType, outputQScale));
telsoa014fcda012018-03-09 14:13:49 +0000261}
262
263BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32Workload)
264{
telsoa01c577f2c2018-08-31 09:22:23 +0100265 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000266}
267
268BOOST_AUTO_TEST_CASE(CreateFullyConnectedUint8Workload)
269{
telsoa01c577f2c2018-08-31 09:22:23 +0100270 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000271}
272
narpra0155a97bc2018-10-02 14:35:53 +0100273template <typename NormalizationWorkloadType, armnn::DataType DataType>
Matteo Martincigha160b242018-10-18 10:33:23 +0100274static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000275{
narpra0155a97bc2018-10-02 14:35:53 +0100276 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000277 RefWorkloadFactory factory;
Matteo Martincigha160b242018-10-18 10:33:23 +0100278 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
279
280 TensorShape inputShape;
281 TensorShape outputShape;
282
283 switch (dataLayout)
284 {
285 case DataLayout::NHWC:
286 inputShape = { 3, 1, 5, 5 };
287 outputShape = { 3, 1, 5, 5 };
288 break;
289 case DataLayout::NCHW:
290 default:
291 inputShape = { 3, 5, 5, 1 };
292 outputShape = { 3, 5, 5, 1 };
293 break;
294 }
telsoa014fcda012018-03-09 14:13:49 +0000295
telsoa01c577f2c2018-08-31 09:22:23 +0100296 // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
Matteo Martincigha160b242018-10-18 10:33:23 +0100297 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
narpra0155a97bc2018-10-02 14:35:53 +0100298}
299
300BOOST_AUTO_TEST_CASE(CreateRefNormalizationNchwWorkload)
301{
Matteo Martincigha160b242018-10-18 10:33:23 +0100302 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
303}
304
305BOOST_AUTO_TEST_CASE(CreateRefNormalizationNhwcWorkload)
306{
307 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000308}
309
telsoa01c577f2c2018-08-31 09:22:23 +0100310template <typename Pooling2dWorkloadType, armnn::DataType DataType>
James Conroy69482272018-10-19 10:41:35 +0100311static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000312{
313 Graph graph;
314 RefWorkloadFactory factory;
James Conroy69482272018-10-19 10:41:35 +0100315 auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
316
317 TensorShape inputShape;
318 TensorShape outputShape;
319
320 switch (dataLayout)
321 {
322 case DataLayout::NHWC:
323 inputShape = { 3, 5, 5, 2 };
324 outputShape = { 3, 2, 4, 2 };
325 break;
326 case DataLayout::NCHW:
327 default:
328 inputShape = { 3, 2, 5, 5 };
329 outputShape = { 3, 2, 2, 4 };
330 }
telsoa014fcda012018-03-09 14:13:49 +0000331
telsoa01c577f2c2018-08-31 09:22:23 +0100332 // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
James Conroy69482272018-10-19 10:41:35 +0100333 CheckInputOutput(std::move(workload),
334 TensorInfo(inputShape, DataType),
335 TensorInfo(outputShape, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000336}
337
338BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
339{
James Conroy69482272018-10-19 10:41:35 +0100340 RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
341}
342
343BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
344{
345 RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000346}
347
348BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
349{
James Conroy69482272018-10-19 10:41:35 +0100350 RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
351}
352
353BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
354{
355 RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000356}
357
telsoa01c577f2c2018-08-31 09:22:23 +0100358template <typename SoftmaxWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000359static void RefCreateSoftmaxWorkloadTest()
360{
361 Graph graph;
362 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100363 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000364
telsoa01c577f2c2018-08-31 09:22:23 +0100365 // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000366 CheckInputOutput(
367 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100368 TensorInfo({4, 1}, DataType),
369 TensorInfo({4, 1}, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000370}
371
372BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
373{
telsoa01c577f2c2018-08-31 09:22:23 +0100374 RefCreateSoftmaxWorkloadTest<RefSoftmaxFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000375}
376
377BOOST_AUTO_TEST_CASE(CreateSoftmaxUint8Workload)
378{
telsoa01c577f2c2018-08-31 09:22:23 +0100379 RefCreateSoftmaxWorkloadTest<RefSoftmaxUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000380}
381
telsoa01c577f2c2018-08-31 09:22:23 +0100382template <typename SplitterWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000383static void RefCreateSplitterWorkloadTest()
384{
385 Graph graph;
386 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100387 auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000388
telsoa01c577f2c2018-08-31 09:22:23 +0100389 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000390 SplitterQueueDescriptor queueDescriptor = workload->GetData();
391 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100392 BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100393
telsoa014fcda012018-03-09 14:13:49 +0000394 auto outputHandle0 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100395 BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100396
telsoa014fcda012018-03-09 14:13:49 +0000397 auto outputHandle1 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +0100398 BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100399
telsoa014fcda012018-03-09 14:13:49 +0000400 auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[2]);
telsoa01c577f2c2018-08-31 09:22:23 +0100401 BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000402}
403
404BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
405{
telsoa01c577f2c2018-08-31 09:22:23 +0100406 RefCreateSplitterWorkloadTest<RefSplitterFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000407}
408
409BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
410{
telsoa01c577f2c2018-08-31 09:22:23 +0100411 RefCreateSplitterWorkloadTest<RefSplitterUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000412}
413
telsoa01c577f2c2018-08-31 09:22:23 +0100414template <typename SplitterWorkloadType, typename MergerWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000415static void RefCreateSplitterMergerWorkloadTest()
416{
telsoa01c577f2c2018-08-31 09:22:23 +0100417 // Tests that it is possible to decide which output of the splitter layer
418 // should be lined to which input of the merger layer.
419 // We tested that is is possible to specify 0th output
420 // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
telsoa014fcda012018-03-09 14:13:49 +0000421 // of the merger.
422
423 Graph graph;
424 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100425 auto workloads = CreateSplitterMergerWorkloadTest<SplitterWorkloadType, MergerWorkloadType, DataType>
426 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000427
428 auto wlSplitter = std::move(workloads.first);
429 auto wlMerger = std::move(workloads.second);
430
telsoa01c577f2c2018-08-31 09:22:23 +0100431 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000432 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
433 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
434 armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
435 armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
436
437 BOOST_TEST(sOut0);
438 BOOST_TEST(sOut1);
439 BOOST_TEST(mIn0);
440 BOOST_TEST(mIn1);
441
442 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
443
444 BOOST_TEST(validDataPointers);
445}
446
447BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32)
448{
telsoa01c577f2c2018-08-31 09:22:23 +0100449 RefCreateSplitterMergerWorkloadTest<RefSplitterFloat32Workload, RefMergerFloat32Workload, DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000450}
451
452BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8)
453{
telsoa01c577f2c2018-08-31 09:22:23 +0100454 RefCreateSplitterMergerWorkloadTest<RefSplitterUint8Workload, RefMergerUint8Workload, DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000455}
456
telsoa01c577f2c2018-08-31 09:22:23 +0100457template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000458static void RefCreateSingleOutputMultipleInputsTest()
459{
telsoa01c577f2c2018-08-31 09:22:23 +0100460 // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
461 // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
telsoa014fcda012018-03-09 14:13:49 +0000462
463 Graph graph;
464 RefWorkloadFactory factory;
465 std::unique_ptr<SplitterWorkloadType> wlSplitter;
466 std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
467 std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
468 std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
469 std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
470
471 CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
telsoa01c577f2c2018-08-31 09:22:23 +0100472 ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000473
474 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
475 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
476 armnn::CpuTensorHandle* activ0_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
477 armnn::CpuTensorHandle* activ0_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
478 armnn::CpuTensorHandle* activ1_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
479 armnn::CpuTensorHandle* activ1_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
480
481
482 BOOST_TEST(sOut0);
483 BOOST_TEST(sOut1);
484 BOOST_TEST(activ0_0Im);
485 BOOST_TEST(activ0_1Im);
486 BOOST_TEST(activ1_0Im);
487 BOOST_TEST(activ1_1Im);
488
489 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
490 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
491
492 BOOST_TEST(validDataPointers);
493}
494
495BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
496{
telsoa01c577f2c2018-08-31 09:22:23 +0100497 RefCreateSingleOutputMultipleInputsTest<RefSplitterFloat32Workload, RefActivationFloat32Workload,
498 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000499}
500
501BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
502{
telsoa01c577f2c2018-08-31 09:22:23 +0100503 RefCreateSingleOutputMultipleInputsTest<RefSplitterUint8Workload, RefActivationUint8Workload,
504 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000505}
506
telsoa01c577f2c2018-08-31 09:22:23 +0100507template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
James Conroy59540822018-10-11 12:39:05 +0100508static void RefCreateResizeBilinearTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000509{
510 Graph graph;
511 RefWorkloadFactory factory;
James Conroy59540822018-10-11 12:39:05 +0100512 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
513
514 TensorShape inputShape;
515 TensorShape outputShape;
516
517 switch (dataLayout)
518 {
519 case DataLayout::NHWC:
520 inputShape = { 2, 4, 4, 3 };
521 outputShape = { 2, 2, 2, 3 };
522 break;
James Conroy69482272018-10-19 10:41:35 +0100523 case DataLayout::NCHW:
524 default:
James Conroy59540822018-10-11 12:39:05 +0100525 inputShape = { 2, 3, 4, 4 };
526 outputShape = { 2, 3, 2, 2 };
527 }
telsoa014fcda012018-03-09 14:13:49 +0000528
telsoa01c577f2c2018-08-31 09:22:23 +0100529 // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
James Conroy69482272018-10-19 10:41:35 +0100530 CheckInputOutput(std::move(workload),
531 TensorInfo(inputShape, DataType),
532 TensorInfo(outputShape, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000533}
534
535BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
536{
James Conroy59540822018-10-11 12:39:05 +0100537 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +0000538}
539
540BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
541{
James Conroy59540822018-10-11 12:39:05 +0100542 RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
543}
544
545BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
546{
547 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000548}
549
Matteo Martincighb63973e2018-10-16 16:23:33 +0100550template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
551static void RefCreateL2NormalizationTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000552{
553 Graph graph;
554 RefWorkloadFactory factory;
Matteo Martincighb63973e2018-10-16 16:23:33 +0100555 auto workload =
556 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
557
558 TensorShape inputShape;
559 TensorShape outputShape;
560
561 switch (dataLayout)
562 {
563 case DataLayout::NHWC:
564 inputShape = { 5, 50, 67, 20 };
565 outputShape = { 5, 50, 67, 20 };
566 break;
567 case DataLayout::NCHW:
568 default:
569 inputShape = { 5, 20, 50, 67 };
570 outputShape = { 5, 20, 50, 67 };
571 break;
572 }
telsoa014fcda012018-03-09 14:13:49 +0000573
telsoa01c577f2c2018-08-31 09:22:23 +0100574 // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
Matteo Martincighb63973e2018-10-16 16:23:33 +0100575 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
576}
577
578BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
579{
580 RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
581}
582
583BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
584{
585 RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000586}
587
telsoa01c577f2c2018-08-31 09:22:23 +0100588template <typename ReshapeWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000589static void RefCreateReshapeWorkloadTest()
590{
591 Graph graph;
592 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100593 auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000594
telsoa01c577f2c2018-08-31 09:22:23 +0100595 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000596 CheckInputOutput(
597 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100598 TensorInfo({ 4, 1 }, DataType),
599 TensorInfo({ 1, 4 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000600}
601
602BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload)
603{
telsoa01c577f2c2018-08-31 09:22:23 +0100604 RefCreateReshapeWorkloadTest<RefReshapeFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000605}
606
607BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
608{
telsoa01c577f2c2018-08-31 09:22:23 +0100609 RefCreateReshapeWorkloadTest<RefReshapeUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000610}
611
612BOOST_AUTO_TEST_SUITE_END()