blob: 6ec89aa5794078aef50064162d2ca8ee6481a628 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "ClContextControlFixture.hpp"
telsoa014fcda012018-03-09 14:13:49 +00007
arovir0143095f32018-10-09 18:04:24 +01008#include <backends/MemCopyWorkload.hpp>
9#include <backends/cl/ClTensorHandle.hpp>
10#include <backends/cl/ClWorkloadFactory.hpp>
11#include <backends/cl/workloads/ClWorkloads.hpp>
12#include <backends/cl/workloads/ClWorkloadUtils.hpp>
13#include <backends/reference/RefWorkloadFactory.hpp>
14
David Beckac42efd2018-09-26 17:41:13 +010015#include <test/CreateWorkloadClNeon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
17boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
18 std::initializer_list<unsigned int> expectedDimensions)
19{
20 return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
21}
22
telsoa01c577f2c2018-08-31 09:22:23 +010023BOOST_FIXTURE_TEST_SUITE(CreateWorkloadCl, ClContextControlFixture)
telsoa014fcda012018-03-09 14:13:49 +000024
telsoa01c577f2c2018-08-31 09:22:23 +010025template <typename ActivationWorkloadType, armnn::DataType DataType>
26static void ClCreateActivationWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000027{
28 Graph graph;
29 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +000030
telsoa01c577f2c2018-08-31 09:22:23 +010031 auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000032
telsoa01c577f2c2018-08-31 09:22:23 +010033 // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000034 ActivationQueueDescriptor queueDescriptor = workload->GetData();
35 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
36 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
37
38 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1}));
39 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1}));
40}
41
arovir019e53a352018-08-31 15:26:35 +010042BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +010043{
arovir019e53a352018-08-31 15:26:35 +010044 ClCreateActivationWorkloadTest<ClActivationFloatWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010045}
46
47BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
48{
arovir019e53a352018-08-31 15:26:35 +010049 ClCreateActivationWorkloadTest<ClActivationFloatWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +010050}
51
David Beck4a8692c2018-09-07 16:19:24 +010052template <typename WorkloadType,
53 typename DescriptorType,
54 typename LayerType,
55 armnn::DataType DataType>
56static void ClCreateArithmethicWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000057{
58 Graph graph;
59 ClWorkloadFactory factory;
David Beck4a8692c2018-09-07 16:19:24 +010060 auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000061
James Conroyb9bf9462018-09-19 11:58:44 +010062 // Checks that inputs/outputs are as we expect them (see definition of CreateArithmeticWorkloadTest).
David Beck4a8692c2018-09-07 16:19:24 +010063 DescriptorType queueDescriptor = workload->GetData();
telsoa014fcda012018-03-09 14:13:49 +000064 auto inputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
65 auto inputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
66 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
67 BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3}));
68 BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3}));
69 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
70}
71
arovir019e53a352018-08-31 15:26:35 +010072BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +000073{
Nattapat Chaimanowongcd066ca2018-10-10 12:11:50 +010074 ClCreateArithmethicWorkloadTest<ClAdditionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010075 AdditionQueueDescriptor,
76 AdditionLayer,
77 armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010078}
79
80BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
81{
Nattapat Chaimanowongcd066ca2018-10-10 12:11:50 +010082 ClCreateArithmethicWorkloadTest<ClAdditionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010083 AdditionQueueDescriptor,
84 AdditionLayer,
85 armnn::DataType::Float16>();
86}
87
88BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
89{
Matthew Bentham092b3042018-10-01 16:39:28 +010090 ClCreateArithmethicWorkloadTest<ClSubtractionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010091 SubtractionQueueDescriptor,
92 SubtractionLayer,
93 armnn::DataType::Float32>();
94}
95
96BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
97{
Matthew Bentham092b3042018-10-01 16:39:28 +010098 ClCreateArithmethicWorkloadTest<ClSubtractionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010099 SubtractionQueueDescriptor,
100 SubtractionLayer,
101 armnn::DataType::Float16>();
102}
103
104BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest)
105{
Matthew Benthame2ec3302018-10-01 11:32:48 +0100106 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
David Beck4a8692c2018-09-07 16:19:24 +0100107 MultiplicationQueueDescriptor,
108 MultiplicationLayer,
109 armnn::DataType::Float32>();
110}
111
112BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest)
113{
Matthew Benthame2ec3302018-10-01 11:32:48 +0100114 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
David Beck4a8692c2018-09-07 16:19:24 +0100115 MultiplicationQueueDescriptor,
116 MultiplicationLayer,
117 armnn::DataType::Float16>();
118}
119
Matthew Benthame2ec3302018-10-01 11:32:48 +0100120BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
121{
122 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
123 MultiplicationQueueDescriptor,
124 MultiplicationLayer,
125 armnn::DataType::QuantisedAsymm8>();
126}
127
David Beck4a8692c2018-09-07 16:19:24 +0100128BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
129{
130 ClCreateArithmethicWorkloadTest<ClDivisionFloatWorkload,
131 DivisionQueueDescriptor,
132 DivisionLayer,
133 armnn::DataType::Float32>();
134}
135
136BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest)
137{
138 ClCreateArithmethicWorkloadTest<ClDivisionFloatWorkload,
139 DivisionQueueDescriptor,
140 DivisionLayer,
141 armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100142}
143
144template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
145static void ClCreateBatchNormalizationWorkloadTest()
146{
147 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000148 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000149
telsoa01c577f2c2018-08-31 09:22:23 +0100150 auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>
151 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000152
telsoa01c577f2c2018-08-31 09:22:23 +0100153 // Checks that inputs/outputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000154 BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
155 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
156 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
157
158 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 1, 1}));
159 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3, 1, 1}));
160}
161
arovir019e53a352018-08-31 15:26:35 +0100162BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000163{
arovir019e53a352018-08-31 15:26:35 +0100164 ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100165}
telsoa014fcda012018-03-09 14:13:49 +0000166
telsoa01c577f2c2018-08-31 09:22:23 +0100167BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
168{
arovir019e53a352018-08-31 15:26:35 +0100169 ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100170}
171
172BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
173{
174 Graph graph;
175 ClWorkloadFactory factory;
176 auto workload = CreateConvertFp16ToFp32WorkloadTest<ClConvertFp16ToFp32Workload>(factory, graph);
177
178 ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
179 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
180 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
181
182 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3}));
183 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3}));
184 BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
185 BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
186}
187
188BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
189{
190 Graph graph;
191 ClWorkloadFactory factory;
192 auto workload = CreateConvertFp32ToFp16WorkloadTest<ClConvertFp32ToFp16Workload>(factory, graph);
193
194 ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
195 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
196 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
197
198 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3}));
199 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3}));
200 BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
201 BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
202}
203
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100204template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
205static void ClConvolution2dWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100206{
207 Graph graph;
208 ClWorkloadFactory factory;
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100209 auto workload = CreateConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory,
210 graph,
211 dataLayout);
212
213 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
214 std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
215 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
216 std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
telsoa01c577f2c2018-08-31 09:22:23 +0100217
218 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000219 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
220 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
221 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100222 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
223 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000224}
225
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100226BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000227{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100228 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100229}
230
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100231BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100232{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100233 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000234}
235
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100236BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
237{
238 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
239}
240
241BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
242{
243 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
244}
245
246template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100247static void ClDirectConvolution2dWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000248{
telsoa01c577f2c2018-08-31 09:22:23 +0100249 Graph graph;
250 ClWorkloadFactory factory;
Matthew Benthamd8067922018-10-03 17:18:04 +0100251 auto workload = CreateDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000252
telsoa01c577f2c2018-08-31 09:22:23 +0100253 // Checks that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000254 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
255 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
256 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
257 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6}));
258 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}));
259}
260
arovir019e53a352018-08-31 15:26:35 +0100261BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000262{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100263 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100264}
265
266BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
267{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100268 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>();
telsoa014fcda012018-03-09 14:13:49 +0000269}
270
271BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
272{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100273 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000274}
275
telsoa01c577f2c2018-08-31 09:22:23 +0100276template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
277static void ClCreateFullyConnectedWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000278{
telsoa01c577f2c2018-08-31 09:22:23 +0100279 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000280 ClWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100281 auto workload =
282 CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000283
telsoa01c577f2c2018-08-31 09:22:23 +0100284 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000285 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
286 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
287 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
288 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5}));
289 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7}));
290}
291
telsoa01c577f2c2018-08-31 09:22:23 +0100292
arovir019e53a352018-08-31 15:26:35 +0100293BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest)
telsoa014fcda012018-03-09 14:13:49 +0000294{
Matthew Benthamab8cdc12018-09-17 11:17:41 +0100295 ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100296}
297
298BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest)
299{
Matthew Benthamab8cdc12018-09-17 11:17:41 +0100300 ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100301}
302
telsoa01c577f2c2018-08-31 09:22:23 +0100303template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
narpra0155a97bc2018-10-02 14:35:53 +0100304static void ClNormalizationWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100305{
306 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000307 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000308
telsoa01c577f2c2018-08-31 09:22:23 +0100309 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>
narpra0155a97bc2018-10-02 14:35:53 +0100310 (factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000311
telsoa01c577f2c2018-08-31 09:22:23 +0100312 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000313 NormalizationQueueDescriptor queueDescriptor = workload->GetData();
314 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
315 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
316
317 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 5, 5, 1}));
318 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 5, 5, 1}));
319}
320
narpra0155a97bc2018-10-02 14:35:53 +0100321BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NchwWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000322{
narpra0155a97bc2018-10-02 14:35:53 +0100323 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100324}
325
narpra0155a97bc2018-10-02 14:35:53 +0100326BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100327{
narpra0155a97bc2018-10-02 14:35:53 +0100328 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
329}
330
331BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NhwcWorkload)
332{
333 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
334}
335
336BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
337{
338 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100339}
340
341template <typename Pooling2dWorkloadType, typename armnn::DataType DataType>
Nina Drozdb48e6862018-10-09 12:09:56 +0100342static void ClPooling2dWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100343{
344 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000345 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000346
Nina Drozdb48e6862018-10-09 12:09:56 +0100347 auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
348
349 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
350 std::initializer_list<unsigned int>({3, 2, 5, 5}) : std::initializer_list<unsigned int>({3, 5, 5, 2});
351 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
352 std::initializer_list<unsigned int>({3, 2, 2, 4}) : std::initializer_list<unsigned int>({3, 2, 4, 2});
telsoa014fcda012018-03-09 14:13:49 +0000353
telsoa01c577f2c2018-08-31 09:22:23 +0100354 // Check that inputs/outputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000355 Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
356 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
357 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
358
Nina Drozdb48e6862018-10-09 12:09:56 +0100359 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
360 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000361}
362
Nina Drozdb48e6862018-10-09 12:09:56 +0100363BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100364{
Nina Drozdb48e6862018-10-09 12:09:56 +0100365 ClPooling2dWorkloadTest<ClPooling2dFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100366}
367
Nina Drozdb48e6862018-10-09 12:09:56 +0100368BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100369{
Nina Drozdb48e6862018-10-09 12:09:56 +0100370 ClPooling2dWorkloadTest<ClPooling2dFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
371}
372
373BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NchwWorkload)
374{
375 ClPooling2dWorkloadTest<ClPooling2dFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
376}
377
378BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NhwcWorkload)
379{
380 ClPooling2dWorkloadTest<ClPooling2dFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100381}
382
383template <typename ReshapeWorkloadType, typename armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000384static void ClCreateReshapeWorkloadTest()
385{
telsoa01c577f2c2018-08-31 09:22:23 +0100386 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000387 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000388
telsoa01c577f2c2018-08-31 09:22:23 +0100389 auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000390
telsoa01c577f2c2018-08-31 09:22:23 +0100391 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000392 ReshapeQueueDescriptor queueDescriptor = workload->GetData();
telsoa01c577f2c2018-08-31 09:22:23 +0100393 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
394 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000395
396 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
397 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4})); // Leading size 1 dimensions are collapsed by ACL.
398}
399
arovir019e53a352018-08-31 15:26:35 +0100400BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000401{
arovir019e53a352018-08-31 15:26:35 +0100402 ClCreateReshapeWorkloadTest<ClReshapeFloatWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100403}
404
405BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
406{
arovir019e53a352018-08-31 15:26:35 +0100407 ClCreateReshapeWorkloadTest<ClReshapeFloatWorkload, armnn::DataType::Float16>();
telsoa014fcda012018-03-09 14:13:49 +0000408}
409
410BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
411{
telsoa01c577f2c2018-08-31 09:22:23 +0100412 ClCreateReshapeWorkloadTest<ClReshapeUint8Workload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000413}
414
telsoa01c577f2c2018-08-31 09:22:23 +0100415template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
416static void ClSoftmaxWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000417{
telsoa01c577f2c2018-08-31 09:22:23 +0100418 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000419 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000420
telsoa01c577f2c2018-08-31 09:22:23 +0100421 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000422
arovir019e53a352018-08-31 15:26:35 +0100423 // Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloatWorkload).
telsoa014fcda012018-03-09 14:13:49 +0000424 SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
telsoa01c577f2c2018-08-31 09:22:23 +0100425 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
426 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000427
428 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
429 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
430}
431
telsoa01c577f2c2018-08-31 09:22:23 +0100432
arovir019e53a352018-08-31 15:26:35 +0100433BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkloadTest)
telsoa01c577f2c2018-08-31 09:22:23 +0100434{
arovir019e53a352018-08-31 15:26:35 +0100435 ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100436}
437
438BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest)
439{
arovir019e53a352018-08-31 15:26:35 +0100440 ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100441}
442
Matthew Bentham29cadb32018-10-01 17:22:32 +0100443template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100444static void ClSplitterWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000445{
446 Graph graph;
447 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000448
Matthew Bentham29cadb32018-10-01 17:22:32 +0100449 auto workload = CreateSplitterWorkloadTest<ClSplitterWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000450
telsoa01c577f2c2018-08-31 09:22:23 +0100451 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000452 SplitterQueueDescriptor queueDescriptor = workload->GetData();
453 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
surmeh013537c2c2018-05-18 16:31:43 +0100454 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7}));
455
telsoa014fcda012018-03-09 14:13:49 +0000456 auto outputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
surmeh013537c2c2018-05-18 16:31:43 +0100457 BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7}));
458
telsoa014fcda012018-03-09 14:13:49 +0000459 auto outputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
surmeh013537c2c2018-05-18 16:31:43 +0100460 BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7}));
461
462 auto outputHandle0 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
463 // NOTE: At the moment the CL collapses the tensor to a 2 dim when dimension zero = 1
telsoa01c577f2c2018-08-31 09:22:23 +0100464 // we are raising this difference between the NEON and CL libs as an issue with the compute library team.
surmeh013537c2c2018-05-18 16:31:43 +0100465 BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {7, 7}));
telsoa014fcda012018-03-09 14:13:49 +0000466}
467
arovir019e53a352018-08-31 15:26:35 +0100468BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000469{
Matthew Bentham29cadb32018-10-01 17:22:32 +0100470 ClSplitterWorkloadTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100471}
472
473BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
474{
Matthew Bentham29cadb32018-10-01 17:22:32 +0100475 ClSplitterWorkloadTest<armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100476}
477
Matthew Bentham29cadb32018-10-01 17:22:32 +0100478template <typename MergerWorkloadType, typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100479static void ClSplitterMergerTest()
480{
481 // Tests that it is possible to decide which output of the splitter layer
482 // should be lined to which input of the merger layer.
telsoa014fcda012018-03-09 14:13:49 +0000483 // We test that is is possible to specify 0th output
484 // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
485 // of the merger.
486
487 Graph graph;
488 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000489
490 auto workloads =
Matthew Bentham29cadb32018-10-01 17:22:32 +0100491 CreateSplitterMergerWorkloadTest<ClSplitterWorkload, MergerWorkloadType, DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100492 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000493
494 auto wlSplitter = std::move(workloads.first);
495 auto wlMerger = std::move(workloads.second);
496
telsoa01c577f2c2018-08-31 09:22:23 +0100497 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000498 armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
499 armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
500 armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
501 armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
502
503 BOOST_TEST(sOut0);
504 BOOST_TEST(sOut1);
505 BOOST_TEST(mIn0);
506 BOOST_TEST(mIn1);
507
telsoa01c577f2c2018-08-31 09:22:23 +0100508 //Fliped order of inputs/outputs.
telsoa014fcda012018-03-09 14:13:49 +0000509 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
510 BOOST_TEST(validDataPointers);
511
512
telsoa01c577f2c2018-08-31 09:22:23 +0100513 //Also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor.
telsoa014fcda012018-03-09 14:13:49 +0000514 bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent())
515 && (sOut0->GetTensor().parent() == sOut1->GetTensor().parent());
516
517 BOOST_TEST(validSubTensorParents);
518}
519
arovir019e53a352018-08-31 15:26:35 +0100520BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100521{
Matthew Bentham29cadb32018-10-01 17:22:32 +0100522 ClSplitterMergerTest<ClMergerFloatWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100523}
524
525BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload)
526{
Matthew Bentham29cadb32018-10-01 17:22:32 +0100527 ClSplitterMergerTest<ClMergerFloatWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100528}
529
530
telsoa014fcda012018-03-09 14:13:49 +0000531BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
532{
533 // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
telsoa01c577f2c2018-08-31 09:22:23 +0100534 // We create a splitter with two outputs. That each of those outputs is used by two different activation layers.
telsoa014fcda012018-03-09 14:13:49 +0000535
536 Graph graph;
537 ClWorkloadFactory factory;
Matthew Bentham29cadb32018-10-01 17:22:32 +0100538 std::unique_ptr<ClSplitterWorkload> wlSplitter;
arovir019e53a352018-08-31 15:26:35 +0100539 std::unique_ptr<ClActivationFloatWorkload> wlActiv0_0;
540 std::unique_ptr<ClActivationFloatWorkload> wlActiv0_1;
541 std::unique_ptr<ClActivationFloatWorkload> wlActiv1_0;
542 std::unique_ptr<ClActivationFloatWorkload> wlActiv1_1;
telsoa014fcda012018-03-09 14:13:49 +0000543
Matthew Bentham29cadb32018-10-01 17:22:32 +0100544 CreateSplitterMultipleInputsOneOutputWorkloadTest<ClSplitterWorkload,
arovir019e53a352018-08-31 15:26:35 +0100545 ClActivationFloatWorkload, armnn::DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
telsoa01c577f2c2018-08-31 09:22:23 +0100546 wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000547
telsoa01c577f2c2018-08-31 09:22:23 +0100548 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000549 armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
550 armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
551 armnn::ClSubTensorHandle* activ0_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
552 armnn::ClSubTensorHandle* activ0_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
553 armnn::ClSubTensorHandle* activ1_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
554 armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
555
556
557 BOOST_TEST(sOut0);
558 BOOST_TEST(sOut1);
559 BOOST_TEST(activ0_0Im);
560 BOOST_TEST(activ0_1Im);
561 BOOST_TEST(activ1_0Im);
562 BOOST_TEST(activ1_1Im);
563
564 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
565 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
566
567 BOOST_TEST(validDataPointers);
568}
569
570BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl)
571{
572 ClWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100573 CreateMemCopyWorkloads<IClTensorHandle>(factory);
telsoa014fcda012018-03-09 14:13:49 +0000574}
575
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100576template <typename L2NormalizationWorkloadType, typename armnn::DataType DataType>
577static void ClL2NormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000578{
telsoa01c577f2c2018-08-31 09:22:23 +0100579 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000580 ClWorkloadFactory factory;
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100581 auto workload =
582 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000583
telsoa01c577f2c2018-08-31 09:22:23 +0100584 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000585 L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
586 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
587 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
588
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100589 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
590 ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
591 : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
592 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
593 ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
594 : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
595
596 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
597 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000598}
599
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100600BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload)
601{
602 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
603}
604
605BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNhwcWorkload)
606{
607 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
608}
609
610BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
611{
612 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
613}
614
615BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
616{
617 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
618}
619
telsoa01c577f2c2018-08-31 09:22:23 +0100620template <typename LstmWorkloadType>
621static void ClCreateLstmWorkloadTest()
622{
623 Graph graph;
624 ClWorkloadFactory factory;
625 auto workload = CreateLstmWorkloadTest<LstmWorkloadType>(factory, graph);
626
627 LstmQueueDescriptor queueDescriptor = workload->GetData();
628 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
629 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
630 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 }));
631 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 }));
632}
633
arovir019e53a352018-08-31 15:26:35 +0100634BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100635{
arovir019e53a352018-08-31 15:26:35 +0100636 ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
telsoa01c577f2c2018-08-31 09:22:23 +0100637}
638
James Conroy074f3712018-10-03 09:32:03 +0100639template <typename ResizeBilinearWorkloadType, typename armnn::DataType DataType>
640static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
641{
642 Graph graph;
643 ClWorkloadFactory factory;
644
645 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
646
647 // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
648 ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
649 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
650 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
651
652 switch (dataLayout)
653 {
654 case DataLayout::NHWC:
655 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
656 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
657 break;
658 default: // NCHW
659 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
660 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
661 }
662}
663
664BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload)
665{
666 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
667}
668
669BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload)
670{
671 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
672}
673
674BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload)
675{
676 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
677}
678
679BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload)
680{
681 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
682}
telsoa01c577f2c2018-08-31 09:22:23 +0100683
telsoa014fcda012018-03-09 14:13:49 +0000684BOOST_AUTO_TEST_SUITE_END()