blob: 1ec7749168723f0468b66b66d47d7cb29b5c7124 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
6#include <armnn/test/CreateWorkload.hpp>
7
8#include <backends/CpuTensorHandle.hpp>
David Beckb4540be2018-09-24 13:18:27 +01009#include <backends/reference/RefWorkloadFactory.hpp>
10#include <backends/reference/workloads/RefWorkloads.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
12namespace
13{
14
15template<typename Workload>
16void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
17{
18 auto queueDescriptor = workload->GetData();
19 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
20 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
21 BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
22 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
23}
24
25template <typename Workload>
26void CheckInputsOutput(std::unique_ptr<Workload> workload,
27 const TensorInfo& inputInfo0,
28 const TensorInfo& inputInfo1,
29 const TensorInfo& outputInfo)
30{
31 auto queueDescriptor = workload->GetData();
32 auto inputHandle0 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
33 auto inputHandle1 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[1]);
34 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
35 BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
36 BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
37 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
38}
39}
40
41BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
42
telsoa01c577f2c2018-08-31 09:22:23 +010043template <typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +000044static void RefCreateActivationWorkloadTest()
45{
46 Graph graph;
47 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +010048 auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000049
telsoa01c577f2c2018-08-31 09:22:23 +010050 // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000051 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010052 TensorInfo({ 1, 1 }, DataType),
53 TensorInfo({ 1, 1 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000054}
55
56BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
57{
telsoa01c577f2c2018-08-31 09:22:23 +010058 RefCreateActivationWorkloadTest<RefActivationFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000059}
60
61BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
62{
telsoa01c577f2c2018-08-31 09:22:23 +010063 RefCreateActivationWorkloadTest<RefActivationUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +000064}
65
David Beckbc392452018-09-10 14:47:28 +010066template <typename WorkloadType,
67 typename DescriptorType,
68 typename LayerType,
69 armnn::DataType DataType>
70static void RefCreateArithmethicWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000071{
72 Graph graph;
73 RefWorkloadFactory factory;
David Beckbc392452018-09-10 14:47:28 +010074 auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000075
telsoa014fcda012018-03-09 14:13:49 +000076 CheckInputsOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010077 TensorInfo({ 2, 3 }, DataType),
78 TensorInfo({ 2, 3 }, DataType),
79 TensorInfo({ 2, 3 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000080}
81
82BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
83{
David Beckbc392452018-09-10 14:47:28 +010084 RefCreateArithmethicWorkloadTest<RefAdditionFloat32Workload,
85 AdditionQueueDescriptor,
86 AdditionLayer,
87 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000088}
89
90BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
91{
David Beckbc392452018-09-10 14:47:28 +010092 RefCreateArithmethicWorkloadTest<RefAdditionUint8Workload,
93 AdditionQueueDescriptor,
94 AdditionLayer,
95 armnn::DataType::QuantisedAsymm8>();
96}
97
98BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
99{
100 RefCreateArithmethicWorkloadTest<RefSubtractionFloat32Workload,
101 SubtractionQueueDescriptor,
102 SubtractionLayer,
103 armnn::DataType::Float32>();
104}
105
106BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
107{
108 RefCreateArithmethicWorkloadTest<RefSubtractionUint8Workload,
109 SubtractionQueueDescriptor,
110 SubtractionLayer,
111 armnn::DataType::QuantisedAsymm8>();
112}
113
114BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
115{
116 RefCreateArithmethicWorkloadTest<RefMultiplicationFloat32Workload,
117 MultiplicationQueueDescriptor,
118 MultiplicationLayer,
119 armnn::DataType::Float32>();
120}
121
122BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
123{
124 RefCreateArithmethicWorkloadTest<RefMultiplicationUint8Workload,
125 MultiplicationQueueDescriptor,
126 MultiplicationLayer,
127 armnn::DataType::QuantisedAsymm8>();
128}
129
130BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
131{
132 RefCreateArithmethicWorkloadTest<RefDivisionFloat32Workload,
133 DivisionQueueDescriptor,
134 DivisionLayer,
135 armnn::DataType::Float32>();
136}
137
138BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
139{
140 RefCreateArithmethicWorkloadTest<RefDivisionUint8Workload,
141 DivisionQueueDescriptor,
142 DivisionLayer,
143 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000144}
145
146BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWorkload)
147{
148 Graph graph;
149 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100150 auto workload = CreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload, armnn::DataType::Float32>
151 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000152
telsoa01c577f2c2018-08-31 09:22:23 +0100153 // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000154 CheckInputOutput(
155 std::move(workload), TensorInfo({2, 3, 1, 1}, DataType::Float32), TensorInfo({2, 3, 1, 1}, DataType::Float32));
156}
157
telsoa01c577f2c2018-08-31 09:22:23 +0100158BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
159{
160 Graph graph;
161 RefWorkloadFactory factory;
162 auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
163
164 // Checks that outputs and inputs are as we expect them
165 CheckInputOutput(
166 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
167}
168
169BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
170{
171 Graph graph;
172 RefWorkloadFactory factory;
173 auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
174
175 // Checks that outputs and inputs are as we expect them
176 CheckInputOutput(
177 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
178}
179
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100180static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000181{
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100182 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000183 RefWorkloadFactory factory;
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100184 auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dFloat32Workload, DataType::Float32>
185 (factory, graph, dataLayout);
186
187 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
188 std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
189 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
190 std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000193 CheckInputOutput(std::move(workload),
Nikhil Raje4dfd6e2018-10-18 10:11:04 +0100194 TensorInfo(inputShape, DataType::Float32),
195 TensorInfo(outputShape, DataType::Float32));
196}
197
198BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
199{
200 RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
201}
202
203BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
204{
205 RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000206}
207
telsoa01c577f2c2018-08-31 09:22:23 +0100208template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000209static void RefCreateFullyConnectedWorkloadTest()
210{
211 Graph graph;
212 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100213 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000214
telsoa01c577f2c2018-08-31 09:22:23 +0100215 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
216 float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
217 float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
telsoa014fcda012018-03-09 14:13:49 +0000218 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100219 TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
220 TensorInfo({ 3, 7 }, DataType, outputQScale));
telsoa014fcda012018-03-09 14:13:49 +0000221}
222
223BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32Workload)
224{
telsoa01c577f2c2018-08-31 09:22:23 +0100225 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000226}
227
228BOOST_AUTO_TEST_CASE(CreateFullyConnectedUint8Workload)
229{
telsoa01c577f2c2018-08-31 09:22:23 +0100230 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000231}
232
narpra0155a97bc2018-10-02 14:35:53 +0100233template <typename NormalizationWorkloadType, armnn::DataType DataType>
Matteo Martincigha160b242018-10-18 10:33:23 +0100234static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000235{
narpra0155a97bc2018-10-02 14:35:53 +0100236 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000237 RefWorkloadFactory factory;
Matteo Martincigha160b242018-10-18 10:33:23 +0100238 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
239
240 TensorShape inputShape;
241 TensorShape outputShape;
242
243 switch (dataLayout)
244 {
245 case DataLayout::NHWC:
246 inputShape = { 3, 1, 5, 5 };
247 outputShape = { 3, 1, 5, 5 };
248 break;
249 case DataLayout::NCHW:
250 default:
251 inputShape = { 3, 5, 5, 1 };
252 outputShape = { 3, 5, 5, 1 };
253 break;
254 }
telsoa014fcda012018-03-09 14:13:49 +0000255
telsoa01c577f2c2018-08-31 09:22:23 +0100256 // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
Matteo Martincigha160b242018-10-18 10:33:23 +0100257 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
narpra0155a97bc2018-10-02 14:35:53 +0100258}
259
260BOOST_AUTO_TEST_CASE(CreateRefNormalizationNchwWorkload)
261{
Matteo Martincigha160b242018-10-18 10:33:23 +0100262 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
263}
264
265BOOST_AUTO_TEST_CASE(CreateRefNormalizationNhwcWorkload)
266{
267 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000268}
269
telsoa01c577f2c2018-08-31 09:22:23 +0100270template <typename Pooling2dWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000271static void RefCreatePooling2dWorkloadTest()
272{
273 Graph graph;
274 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100275 auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000276
telsoa01c577f2c2018-08-31 09:22:23 +0100277 // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000278 CheckInputOutput(
279 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100280 TensorInfo({3, 2, 5, 5}, DataType),
281 TensorInfo({3, 2, 2, 4}, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000282}
283
284BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
285{
telsoa01c577f2c2018-08-31 09:22:23 +0100286 RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000287}
288
289BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
290{
telsoa01c577f2c2018-08-31 09:22:23 +0100291 RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000292}
293
telsoa01c577f2c2018-08-31 09:22:23 +0100294template <typename SoftmaxWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000295static void RefCreateSoftmaxWorkloadTest()
296{
297 Graph graph;
298 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100299 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000300
telsoa01c577f2c2018-08-31 09:22:23 +0100301 // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000302 CheckInputOutput(
303 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100304 TensorInfo({4, 1}, DataType),
305 TensorInfo({4, 1}, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000306}
307
308BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
309{
telsoa01c577f2c2018-08-31 09:22:23 +0100310 RefCreateSoftmaxWorkloadTest<RefSoftmaxFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000311}
312
313BOOST_AUTO_TEST_CASE(CreateSoftmaxUint8Workload)
314{
telsoa01c577f2c2018-08-31 09:22:23 +0100315 RefCreateSoftmaxWorkloadTest<RefSoftmaxUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000316}
317
telsoa01c577f2c2018-08-31 09:22:23 +0100318template <typename SplitterWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000319static void RefCreateSplitterWorkloadTest()
320{
321 Graph graph;
322 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100323 auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000324
telsoa01c577f2c2018-08-31 09:22:23 +0100325 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000326 SplitterQueueDescriptor queueDescriptor = workload->GetData();
327 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100328 BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100329
telsoa014fcda012018-03-09 14:13:49 +0000330 auto outputHandle0 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100331 BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100332
telsoa014fcda012018-03-09 14:13:49 +0000333 auto outputHandle1 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +0100334 BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100335
telsoa014fcda012018-03-09 14:13:49 +0000336 auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[2]);
telsoa01c577f2c2018-08-31 09:22:23 +0100337 BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000338}
339
340BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
341{
telsoa01c577f2c2018-08-31 09:22:23 +0100342 RefCreateSplitterWorkloadTest<RefSplitterFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000343}
344
345BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
346{
telsoa01c577f2c2018-08-31 09:22:23 +0100347 RefCreateSplitterWorkloadTest<RefSplitterUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000348}
349
telsoa01c577f2c2018-08-31 09:22:23 +0100350template <typename SplitterWorkloadType, typename MergerWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000351static void RefCreateSplitterMergerWorkloadTest()
352{
telsoa01c577f2c2018-08-31 09:22:23 +0100353 // Tests that it is possible to decide which output of the splitter layer
354 // should be lined to which input of the merger layer.
355 // We tested that is is possible to specify 0th output
356 // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
telsoa014fcda012018-03-09 14:13:49 +0000357 // of the merger.
358
359 Graph graph;
360 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100361 auto workloads = CreateSplitterMergerWorkloadTest<SplitterWorkloadType, MergerWorkloadType, DataType>
362 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000363
364 auto wlSplitter = std::move(workloads.first);
365 auto wlMerger = std::move(workloads.second);
366
telsoa01c577f2c2018-08-31 09:22:23 +0100367 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000368 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
369 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
370 armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
371 armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
372
373 BOOST_TEST(sOut0);
374 BOOST_TEST(sOut1);
375 BOOST_TEST(mIn0);
376 BOOST_TEST(mIn1);
377
378 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
379
380 BOOST_TEST(validDataPointers);
381}
382
383BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32)
384{
telsoa01c577f2c2018-08-31 09:22:23 +0100385 RefCreateSplitterMergerWorkloadTest<RefSplitterFloat32Workload, RefMergerFloat32Workload, DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000386}
387
388BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8)
389{
telsoa01c577f2c2018-08-31 09:22:23 +0100390 RefCreateSplitterMergerWorkloadTest<RefSplitterUint8Workload, RefMergerUint8Workload, DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000391}
392
telsoa01c577f2c2018-08-31 09:22:23 +0100393template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000394static void RefCreateSingleOutputMultipleInputsTest()
395{
telsoa01c577f2c2018-08-31 09:22:23 +0100396 // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
397 // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
telsoa014fcda012018-03-09 14:13:49 +0000398
399 Graph graph;
400 RefWorkloadFactory factory;
401 std::unique_ptr<SplitterWorkloadType> wlSplitter;
402 std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
403 std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
404 std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
405 std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
406
407 CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
telsoa01c577f2c2018-08-31 09:22:23 +0100408 ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000409
410 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
411 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
412 armnn::CpuTensorHandle* activ0_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
413 armnn::CpuTensorHandle* activ0_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
414 armnn::CpuTensorHandle* activ1_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
415 armnn::CpuTensorHandle* activ1_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
416
417
418 BOOST_TEST(sOut0);
419 BOOST_TEST(sOut1);
420 BOOST_TEST(activ0_0Im);
421 BOOST_TEST(activ0_1Im);
422 BOOST_TEST(activ1_0Im);
423 BOOST_TEST(activ1_1Im);
424
425 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
426 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
427
428 BOOST_TEST(validDataPointers);
429}
430
431BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
432{
telsoa01c577f2c2018-08-31 09:22:23 +0100433 RefCreateSingleOutputMultipleInputsTest<RefSplitterFloat32Workload, RefActivationFloat32Workload,
434 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000435}
436
437BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
438{
telsoa01c577f2c2018-08-31 09:22:23 +0100439 RefCreateSingleOutputMultipleInputsTest<RefSplitterUint8Workload, RefActivationUint8Workload,
440 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000441}
442
telsoa01c577f2c2018-08-31 09:22:23 +0100443template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
James Conroy59540822018-10-11 12:39:05 +0100444static void RefCreateResizeBilinearTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000445{
446 Graph graph;
447 RefWorkloadFactory factory;
James Conroy59540822018-10-11 12:39:05 +0100448 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
449
450 TensorShape inputShape;
451 TensorShape outputShape;
452
453 switch (dataLayout)
454 {
455 case DataLayout::NHWC:
456 inputShape = { 2, 4, 4, 3 };
457 outputShape = { 2, 2, 2, 3 };
458 break;
459 default: // NCHW
460 inputShape = { 2, 3, 4, 4 };
461 outputShape = { 2, 3, 2, 2 };
462 }
telsoa014fcda012018-03-09 14:13:49 +0000463
telsoa01c577f2c2018-08-31 09:22:23 +0100464 // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000465 CheckInputOutput(
James Conroy59540822018-10-11 12:39:05 +0100466 std::move(workload),
467 TensorInfo(inputShape, DataType),
468 TensorInfo(outputShape, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000469}
470
471BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
472{
James Conroy59540822018-10-11 12:39:05 +0100473 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +0000474}
475
476BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
477{
James Conroy59540822018-10-11 12:39:05 +0100478 RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
479}
480
481BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
482{
483 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000484}
485
Matteo Martincighb63973e2018-10-16 16:23:33 +0100486template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
487static void RefCreateL2NormalizationTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000488{
489 Graph graph;
490 RefWorkloadFactory factory;
Matteo Martincighb63973e2018-10-16 16:23:33 +0100491 auto workload =
492 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
493
494 TensorShape inputShape;
495 TensorShape outputShape;
496
497 switch (dataLayout)
498 {
499 case DataLayout::NHWC:
500 inputShape = { 5, 50, 67, 20 };
501 outputShape = { 5, 50, 67, 20 };
502 break;
503 case DataLayout::NCHW:
504 default:
505 inputShape = { 5, 20, 50, 67 };
506 outputShape = { 5, 20, 50, 67 };
507 break;
508 }
telsoa014fcda012018-03-09 14:13:49 +0000509
telsoa01c577f2c2018-08-31 09:22:23 +0100510 // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
Matteo Martincighb63973e2018-10-16 16:23:33 +0100511 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
512}
513
514BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
515{
516 RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
517}
518
519BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
520{
521 RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000522}
523
telsoa01c577f2c2018-08-31 09:22:23 +0100524template <typename ReshapeWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000525static void RefCreateReshapeWorkloadTest()
526{
527 Graph graph;
528 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100529 auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000530
telsoa01c577f2c2018-08-31 09:22:23 +0100531 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000532 CheckInputOutput(
533 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100534 TensorInfo({ 4, 1 }, DataType),
535 TensorInfo({ 1, 4 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000536}
537
538BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload)
539{
telsoa01c577f2c2018-08-31 09:22:23 +0100540 RefCreateReshapeWorkloadTest<RefReshapeFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000541}
542
543BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
544{
telsoa01c577f2c2018-08-31 09:22:23 +0100545 RefCreateReshapeWorkloadTest<RefReshapeUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000546}
547
548BOOST_AUTO_TEST_SUITE_END()