blob: e8d536f6e85f7020836e025cb2879c26e8960172 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
6#include <armnn/test/CreateWorkload.hpp>
7
8#include <backends/CpuTensorHandle.hpp>
David Beckb4540be2018-09-24 13:18:27 +01009#include <backends/reference/RefWorkloadFactory.hpp>
10#include <backends/reference/workloads/RefWorkloads.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
12namespace
13{
14
15template<typename Workload>
16void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
17{
18 auto queueDescriptor = workload->GetData();
19 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
20 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
21 BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
22 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
23}
24
25template <typename Workload>
26void CheckInputsOutput(std::unique_ptr<Workload> workload,
27 const TensorInfo& inputInfo0,
28 const TensorInfo& inputInfo1,
29 const TensorInfo& outputInfo)
30{
31 auto queueDescriptor = workload->GetData();
32 auto inputHandle0 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
33 auto inputHandle1 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[1]);
34 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
35 BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
36 BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
37 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
38}
39}
40
41BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
42
telsoa01c577f2c2018-08-31 09:22:23 +010043template <typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +000044static void RefCreateActivationWorkloadTest()
45{
46 Graph graph;
47 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +010048 auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000049
telsoa01c577f2c2018-08-31 09:22:23 +010050 // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000051 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010052 TensorInfo({ 1, 1 }, DataType),
53 TensorInfo({ 1, 1 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000054}
55
56BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
57{
telsoa01c577f2c2018-08-31 09:22:23 +010058 RefCreateActivationWorkloadTest<RefActivationFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000059}
60
61BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
62{
telsoa01c577f2c2018-08-31 09:22:23 +010063 RefCreateActivationWorkloadTest<RefActivationUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +000064}
65
David Beckbc392452018-09-10 14:47:28 +010066template <typename WorkloadType,
67 typename DescriptorType,
68 typename LayerType,
69 armnn::DataType DataType>
70static void RefCreateArithmethicWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000071{
72 Graph graph;
73 RefWorkloadFactory factory;
David Beckbc392452018-09-10 14:47:28 +010074 auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000075
telsoa014fcda012018-03-09 14:13:49 +000076 CheckInputsOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +010077 TensorInfo({ 2, 3 }, DataType),
78 TensorInfo({ 2, 3 }, DataType),
79 TensorInfo({ 2, 3 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +000080}
81
82BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
83{
David Beckbc392452018-09-10 14:47:28 +010084 RefCreateArithmethicWorkloadTest<RefAdditionFloat32Workload,
85 AdditionQueueDescriptor,
86 AdditionLayer,
87 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +000088}
89
90BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
91{
David Beckbc392452018-09-10 14:47:28 +010092 RefCreateArithmethicWorkloadTest<RefAdditionUint8Workload,
93 AdditionQueueDescriptor,
94 AdditionLayer,
95 armnn::DataType::QuantisedAsymm8>();
96}
97
98BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
99{
100 RefCreateArithmethicWorkloadTest<RefSubtractionFloat32Workload,
101 SubtractionQueueDescriptor,
102 SubtractionLayer,
103 armnn::DataType::Float32>();
104}
105
106BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
107{
108 RefCreateArithmethicWorkloadTest<RefSubtractionUint8Workload,
109 SubtractionQueueDescriptor,
110 SubtractionLayer,
111 armnn::DataType::QuantisedAsymm8>();
112}
113
114BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
115{
116 RefCreateArithmethicWorkloadTest<RefMultiplicationFloat32Workload,
117 MultiplicationQueueDescriptor,
118 MultiplicationLayer,
119 armnn::DataType::Float32>();
120}
121
122BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
123{
124 RefCreateArithmethicWorkloadTest<RefMultiplicationUint8Workload,
125 MultiplicationQueueDescriptor,
126 MultiplicationLayer,
127 armnn::DataType::QuantisedAsymm8>();
128}
129
130BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
131{
132 RefCreateArithmethicWorkloadTest<RefDivisionFloat32Workload,
133 DivisionQueueDescriptor,
134 DivisionLayer,
135 armnn::DataType::Float32>();
136}
137
138BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
139{
140 RefCreateArithmethicWorkloadTest<RefDivisionUint8Workload,
141 DivisionQueueDescriptor,
142 DivisionLayer,
143 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000144}
145
146BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWorkload)
147{
148 Graph graph;
149 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100150 auto workload = CreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload, armnn::DataType::Float32>
151 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000152
telsoa01c577f2c2018-08-31 09:22:23 +0100153 // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000154 CheckInputOutput(
155 std::move(workload), TensorInfo({2, 3, 1, 1}, DataType::Float32), TensorInfo({2, 3, 1, 1}, DataType::Float32));
156}
157
telsoa01c577f2c2018-08-31 09:22:23 +0100158BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
159{
160 Graph graph;
161 RefWorkloadFactory factory;
162 auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
163
164 // Checks that outputs and inputs are as we expect them
165 CheckInputOutput(
166 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
167}
168
169BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
170{
171 Graph graph;
172 RefWorkloadFactory factory;
173 auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
174
175 // Checks that outputs and inputs are as we expect them
176 CheckInputOutput(
177 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
178}
179
telsoa014fcda012018-03-09 14:13:49 +0000180BOOST_AUTO_TEST_CASE(CreateConvolution2dWorkload)
181{
182 Graph graph;
183 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100184 auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dFloat32Workload,
185 DataType::Float32>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000186
telsoa01c577f2c2018-08-31 09:22:23 +0100187 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000188 CheckInputOutput(std::move(workload),
189 TensorInfo({2, 3, 8, 16}, DataType::Float32),
190 TensorInfo({2, 2, 2, 10}, DataType::Float32));
191}
192
193BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolution2dWorkload)
194{
195 Graph graph;
196 RefWorkloadFactory factory;
197 auto workload =
198 CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dFloat32Workload>(factory, graph);
199
telsoa01c577f2c2018-08-31 09:22:23 +0100200 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000201 CheckInputOutput(std::move(workload),
202 TensorInfo({2, 3, 8, 16}, DataType::Float32),
203 TensorInfo({2, 9, 2, 10}, DataType::Float32));
204}
205
telsoa01c577f2c2018-08-31 09:22:23 +0100206template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000207static void RefCreateFullyConnectedWorkloadTest()
208{
209 Graph graph;
210 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100211 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000212
telsoa01c577f2c2018-08-31 09:22:23 +0100213 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
214 float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
215 float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
telsoa014fcda012018-03-09 14:13:49 +0000216 CheckInputOutput(std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100217 TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
218 TensorInfo({ 3, 7 }, DataType, outputQScale));
telsoa014fcda012018-03-09 14:13:49 +0000219}
220
221BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32Workload)
222{
telsoa01c577f2c2018-08-31 09:22:23 +0100223 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000224}
225
226BOOST_AUTO_TEST_CASE(CreateFullyConnectedUint8Workload)
227{
telsoa01c577f2c2018-08-31 09:22:23 +0100228 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000229}
230
narpra0155a97bc2018-10-02 14:35:53 +0100231template <typename NormalizationWorkloadType, armnn::DataType DataType>
232static void RefCreateNormalizationWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000233{
narpra0155a97bc2018-10-02 14:35:53 +0100234 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000235 RefWorkloadFactory factory;
narpra0155a97bc2018-10-02 14:35:53 +0100236 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000237
telsoa01c577f2c2018-08-31 09:22:23 +0100238 // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000239 CheckInputOutput(std::move(workload),
narpra0155a97bc2018-10-02 14:35:53 +0100240 TensorInfo({3, 5, 5, 1}, DataType),
241 TensorInfo({3, 5, 5, 1}, DataType));
242}
243
244BOOST_AUTO_TEST_CASE(CreateRefNormalizationNchwWorkload)
245{
246 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000247}
248
telsoa01c577f2c2018-08-31 09:22:23 +0100249template <typename Pooling2dWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000250static void RefCreatePooling2dWorkloadTest()
251{
252 Graph graph;
253 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100254 auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000255
telsoa01c577f2c2018-08-31 09:22:23 +0100256 // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000257 CheckInputOutput(
258 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100259 TensorInfo({3, 2, 5, 5}, DataType),
260 TensorInfo({3, 2, 2, 4}, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000261}
262
263BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
264{
telsoa01c577f2c2018-08-31 09:22:23 +0100265 RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000266}
267
268BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
269{
telsoa01c577f2c2018-08-31 09:22:23 +0100270 RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000271}
272
telsoa01c577f2c2018-08-31 09:22:23 +0100273template <typename SoftmaxWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000274static void RefCreateSoftmaxWorkloadTest()
275{
276 Graph graph;
277 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100278 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000279
telsoa01c577f2c2018-08-31 09:22:23 +0100280 // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000281 CheckInputOutput(
282 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100283 TensorInfo({4, 1}, DataType),
284 TensorInfo({4, 1}, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000285}
286
287BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
288{
telsoa01c577f2c2018-08-31 09:22:23 +0100289 RefCreateSoftmaxWorkloadTest<RefSoftmaxFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000290}
291
292BOOST_AUTO_TEST_CASE(CreateSoftmaxUint8Workload)
293{
telsoa01c577f2c2018-08-31 09:22:23 +0100294 RefCreateSoftmaxWorkloadTest<RefSoftmaxUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000295}
296
telsoa01c577f2c2018-08-31 09:22:23 +0100297template <typename SplitterWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000298static void RefCreateSplitterWorkloadTest()
299{
300 Graph graph;
301 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100302 auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000303
telsoa01c577f2c2018-08-31 09:22:23 +0100304 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000305 SplitterQueueDescriptor queueDescriptor = workload->GetData();
306 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100307 BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100308
telsoa014fcda012018-03-09 14:13:49 +0000309 auto outputHandle0 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100310 BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100311
telsoa014fcda012018-03-09 14:13:49 +0000312 auto outputHandle1 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[1]);
telsoa01c577f2c2018-08-31 09:22:23 +0100313 BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
surmeh013537c2c2018-05-18 16:31:43 +0100314
telsoa014fcda012018-03-09 14:13:49 +0000315 auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[2]);
telsoa01c577f2c2018-08-31 09:22:23 +0100316 BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000317}
318
319BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
320{
telsoa01c577f2c2018-08-31 09:22:23 +0100321 RefCreateSplitterWorkloadTest<RefSplitterFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000322}
323
324BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
325{
telsoa01c577f2c2018-08-31 09:22:23 +0100326 RefCreateSplitterWorkloadTest<RefSplitterUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000327}
328
telsoa01c577f2c2018-08-31 09:22:23 +0100329template <typename SplitterWorkloadType, typename MergerWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000330static void RefCreateSplitterMergerWorkloadTest()
331{
telsoa01c577f2c2018-08-31 09:22:23 +0100332 // Tests that it is possible to decide which output of the splitter layer
333 // should be lined to which input of the merger layer.
334 // We tested that is is possible to specify 0th output
335 // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
telsoa014fcda012018-03-09 14:13:49 +0000336 // of the merger.
337
338 Graph graph;
339 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100340 auto workloads = CreateSplitterMergerWorkloadTest<SplitterWorkloadType, MergerWorkloadType, DataType>
341 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000342
343 auto wlSplitter = std::move(workloads.first);
344 auto wlMerger = std::move(workloads.second);
345
telsoa01c577f2c2018-08-31 09:22:23 +0100346 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000347 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
348 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
349 armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
350 armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
351
352 BOOST_TEST(sOut0);
353 BOOST_TEST(sOut1);
354 BOOST_TEST(mIn0);
355 BOOST_TEST(mIn1);
356
357 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
358
359 BOOST_TEST(validDataPointers);
360}
361
362BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32)
363{
telsoa01c577f2c2018-08-31 09:22:23 +0100364 RefCreateSplitterMergerWorkloadTest<RefSplitterFloat32Workload, RefMergerFloat32Workload, DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000365}
366
367BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8)
368{
telsoa01c577f2c2018-08-31 09:22:23 +0100369 RefCreateSplitterMergerWorkloadTest<RefSplitterUint8Workload, RefMergerUint8Workload, DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000370}
371
telsoa01c577f2c2018-08-31 09:22:23 +0100372template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000373static void RefCreateSingleOutputMultipleInputsTest()
374{
telsoa01c577f2c2018-08-31 09:22:23 +0100375 // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
376 // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
telsoa014fcda012018-03-09 14:13:49 +0000377
378 Graph graph;
379 RefWorkloadFactory factory;
380 std::unique_ptr<SplitterWorkloadType> wlSplitter;
381 std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
382 std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
383 std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
384 std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
385
386 CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
telsoa01c577f2c2018-08-31 09:22:23 +0100387 ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000388
389 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
390 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
391 armnn::CpuTensorHandle* activ0_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
392 armnn::CpuTensorHandle* activ0_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
393 armnn::CpuTensorHandle* activ1_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
394 armnn::CpuTensorHandle* activ1_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
395
396
397 BOOST_TEST(sOut0);
398 BOOST_TEST(sOut1);
399 BOOST_TEST(activ0_0Im);
400 BOOST_TEST(activ0_1Im);
401 BOOST_TEST(activ1_0Im);
402 BOOST_TEST(activ1_1Im);
403
404 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
405 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
406
407 BOOST_TEST(validDataPointers);
408}
409
410BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
411{
telsoa01c577f2c2018-08-31 09:22:23 +0100412 RefCreateSingleOutputMultipleInputsTest<RefSplitterFloat32Workload, RefActivationFloat32Workload,
413 armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000414}
415
416BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
417{
telsoa01c577f2c2018-08-31 09:22:23 +0100418 RefCreateSingleOutputMultipleInputsTest<RefSplitterUint8Workload, RefActivationUint8Workload,
419 armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000420}
421
telsoa01c577f2c2018-08-31 09:22:23 +0100422template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
James Conroy59540822018-10-11 12:39:05 +0100423static void RefCreateResizeBilinearTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000424{
425 Graph graph;
426 RefWorkloadFactory factory;
James Conroy59540822018-10-11 12:39:05 +0100427 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
428
429 TensorShape inputShape;
430 TensorShape outputShape;
431
432 switch (dataLayout)
433 {
434 case DataLayout::NHWC:
435 inputShape = { 2, 4, 4, 3 };
436 outputShape = { 2, 2, 2, 3 };
437 break;
438 default: // NCHW
439 inputShape = { 2, 3, 4, 4 };
440 outputShape = { 2, 3, 2, 2 };
441 }
telsoa014fcda012018-03-09 14:13:49 +0000442
telsoa01c577f2c2018-08-31 09:22:23 +0100443 // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000444 CheckInputOutput(
James Conroy59540822018-10-11 12:39:05 +0100445 std::move(workload),
446 TensorInfo(inputShape, DataType),
447 TensorInfo(outputShape, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000448}
449
450BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
451{
James Conroy59540822018-10-11 12:39:05 +0100452 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa014fcda012018-03-09 14:13:49 +0000453}
454
455BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
456{
James Conroy59540822018-10-11 12:39:05 +0100457 RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
458}
459
460BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
461{
462 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000463}
464
465BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
466{
467 Graph graph;
468 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100469 auto workload = CreateL2NormalizationWorkloadTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>
470 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000471
telsoa01c577f2c2018-08-31 09:22:23 +0100472 // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000473 CheckInputOutput(
474 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100475 TensorInfo({ 5, 20, 50, 67 }, armnn::DataType::Float32),
476 TensorInfo({ 5, 20, 50, 67 }, armnn::DataType::Float32));
telsoa014fcda012018-03-09 14:13:49 +0000477}
478
telsoa01c577f2c2018-08-31 09:22:23 +0100479template <typename ReshapeWorkloadType, armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000480static void RefCreateReshapeWorkloadTest()
481{
482 Graph graph;
483 RefWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100484 auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000485
telsoa01c577f2c2018-08-31 09:22:23 +0100486 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000487 CheckInputOutput(
488 std::move(workload),
telsoa01c577f2c2018-08-31 09:22:23 +0100489 TensorInfo({ 4, 1 }, DataType),
490 TensorInfo({ 1, 4 }, DataType));
telsoa014fcda012018-03-09 14:13:49 +0000491}
492
493BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload)
494{
telsoa01c577f2c2018-08-31 09:22:23 +0100495 RefCreateReshapeWorkloadTest<RefReshapeFloat32Workload, armnn::DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000496}
497
498BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
499{
telsoa01c577f2c2018-08-31 09:22:23 +0100500 RefCreateReshapeWorkloadTest<RefReshapeUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000501}
502
503BOOST_AUTO_TEST_SUITE_END()