blob: 66c2c2aa409a96455781d32bf187dc8416431a9f [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "ClContextControlFixture.hpp"
telsoa014fcda012018-03-09 14:13:49 +00007
arovir0143095f32018-10-09 18:04:24 +01008#include <backends/MemCopyWorkload.hpp>
9#include <backends/cl/ClTensorHandle.hpp>
10#include <backends/cl/ClWorkloadFactory.hpp>
11#include <backends/cl/workloads/ClWorkloads.hpp>
12#include <backends/cl/workloads/ClWorkloadUtils.hpp>
13#include <backends/reference/RefWorkloadFactory.hpp>
14
David Beckac42efd2018-09-26 17:41:13 +010015#include <test/CreateWorkloadClNeon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
17boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
18 std::initializer_list<unsigned int> expectedDimensions)
19{
20 return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
21}
22
telsoa01c577f2c2018-08-31 09:22:23 +010023BOOST_FIXTURE_TEST_SUITE(CreateWorkloadCl, ClContextControlFixture)
telsoa014fcda012018-03-09 14:13:49 +000024
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010025template <armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +010026static void ClCreateActivationWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000027{
28 Graph graph;
29 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +000030
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010031 auto workload = CreateActivationWorkloadTest<ClActivationWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000032
telsoa01c577f2c2018-08-31 09:22:23 +010033 // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000034 ActivationQueueDescriptor queueDescriptor = workload->GetData();
35 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
36 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
37
38 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1}));
39 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1}));
40}
41
arovir019e53a352018-08-31 15:26:35 +010042BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +010043{
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010044 ClCreateActivationWorkloadTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010045}
46
47BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
48{
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010049 ClCreateActivationWorkloadTest<armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +010050}
51
David Beck4a8692c2018-09-07 16:19:24 +010052template <typename WorkloadType,
53 typename DescriptorType,
54 typename LayerType,
55 armnn::DataType DataType>
56static void ClCreateArithmethicWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000057{
58 Graph graph;
59 ClWorkloadFactory factory;
David Beck4a8692c2018-09-07 16:19:24 +010060 auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000061
James Conroyb9bf9462018-09-19 11:58:44 +010062 // Checks that inputs/outputs are as we expect them (see definition of CreateArithmeticWorkloadTest).
David Beck4a8692c2018-09-07 16:19:24 +010063 DescriptorType queueDescriptor = workload->GetData();
telsoa014fcda012018-03-09 14:13:49 +000064 auto inputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
65 auto inputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
66 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
67 BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3}));
68 BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3}));
69 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
70}
71
arovir019e53a352018-08-31 15:26:35 +010072BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +000073{
Nattapat Chaimanowongcd066ca2018-10-10 12:11:50 +010074 ClCreateArithmethicWorkloadTest<ClAdditionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010075 AdditionQueueDescriptor,
76 AdditionLayer,
77 armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010078}
79
80BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
81{
Nattapat Chaimanowongcd066ca2018-10-10 12:11:50 +010082 ClCreateArithmethicWorkloadTest<ClAdditionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010083 AdditionQueueDescriptor,
84 AdditionLayer,
85 armnn::DataType::Float16>();
86}
87
88BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
89{
Matthew Bentham092b3042018-10-01 16:39:28 +010090 ClCreateArithmethicWorkloadTest<ClSubtractionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010091 SubtractionQueueDescriptor,
92 SubtractionLayer,
93 armnn::DataType::Float32>();
94}
95
96BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
97{
Matthew Bentham092b3042018-10-01 16:39:28 +010098 ClCreateArithmethicWorkloadTest<ClSubtractionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010099 SubtractionQueueDescriptor,
100 SubtractionLayer,
101 armnn::DataType::Float16>();
102}
103
104BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest)
105{
Matthew Benthame2ec3302018-10-01 11:32:48 +0100106 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
David Beck4a8692c2018-09-07 16:19:24 +0100107 MultiplicationQueueDescriptor,
108 MultiplicationLayer,
109 armnn::DataType::Float32>();
110}
111
112BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest)
113{
Matthew Benthame2ec3302018-10-01 11:32:48 +0100114 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
David Beck4a8692c2018-09-07 16:19:24 +0100115 MultiplicationQueueDescriptor,
116 MultiplicationLayer,
117 armnn::DataType::Float16>();
118}
119
Matthew Benthame2ec3302018-10-01 11:32:48 +0100120BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
121{
122 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
123 MultiplicationQueueDescriptor,
124 MultiplicationLayer,
125 armnn::DataType::QuantisedAsymm8>();
126}
127
David Beck4a8692c2018-09-07 16:19:24 +0100128BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
129{
130 ClCreateArithmethicWorkloadTest<ClDivisionFloatWorkload,
131 DivisionQueueDescriptor,
132 DivisionLayer,
133 armnn::DataType::Float32>();
134}
135
136BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest)
137{
138 ClCreateArithmethicWorkloadTest<ClDivisionFloatWorkload,
139 DivisionQueueDescriptor,
140 DivisionLayer,
141 armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100142}
143
144template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
145static void ClCreateBatchNormalizationWorkloadTest()
146{
147 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000148 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000149
telsoa01c577f2c2018-08-31 09:22:23 +0100150 auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>
151 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000152
telsoa01c577f2c2018-08-31 09:22:23 +0100153 // Checks that inputs/outputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000154 BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
155 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
156 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
157
158 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 1, 1}));
159 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3, 1, 1}));
160}
161
arovir019e53a352018-08-31 15:26:35 +0100162BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000163{
arovir019e53a352018-08-31 15:26:35 +0100164 ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100165}
telsoa014fcda012018-03-09 14:13:49 +0000166
telsoa01c577f2c2018-08-31 09:22:23 +0100167BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
168{
arovir019e53a352018-08-31 15:26:35 +0100169 ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100170}
171
172BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
173{
174 Graph graph;
175 ClWorkloadFactory factory;
176 auto workload = CreateConvertFp16ToFp32WorkloadTest<ClConvertFp16ToFp32Workload>(factory, graph);
177
178 ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
179 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
180 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
181
182 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3}));
183 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3}));
184 BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
185 BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
186}
187
188BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
189{
190 Graph graph;
191 ClWorkloadFactory factory;
192 auto workload = CreateConvertFp32ToFp16WorkloadTest<ClConvertFp32ToFp16Workload>(factory, graph);
193
194 ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
195 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
196 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
197
198 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3}));
199 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3}));
200 BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
201 BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
202}
203
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100204template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
205static void ClConvolution2dWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100206{
207 Graph graph;
208 ClWorkloadFactory factory;
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100209 auto workload = CreateConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory,
210 graph,
211 dataLayout);
212
213 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
214 std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
215 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
216 std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
telsoa01c577f2c2018-08-31 09:22:23 +0100217
218 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000219 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
220 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
221 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100222 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
223 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000224}
225
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100226BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000227{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100228 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100229}
230
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100231BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100232{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100233 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000234}
235
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100236BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
237{
238 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
239}
240
241BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
242{
243 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
244}
245
Nikhil Rajcec6b652018-10-12 13:51:57 +0100246template <typename DepthwiseConvolutionWorkloadType, typename armnn::DataType DataType>
247static void ClDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
248{
249 Graph graph;
250 ClWorkloadFactory factory;
251
252 auto workload = CreateDepthwiseConvolution2dWorkloadTest<DepthwiseConvolutionWorkloadType, DataType>
253 (factory, graph, dataLayout);
254
255 // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
256 DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
257 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
258 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
259
260 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
261 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
262 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
263 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
264 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
265 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
266
267 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
268 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
269}
270
271BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
272{
273 ClDepthwiseConvolutionWorkloadTest<ClDepthwiseConvolutionWorkload, DataType::Float32>(DataLayout::NHWC);
274}
275
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100276template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100277static void ClDirectConvolution2dWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000278{
telsoa01c577f2c2018-08-31 09:22:23 +0100279 Graph graph;
280 ClWorkloadFactory factory;
Matthew Benthamd8067922018-10-03 17:18:04 +0100281 auto workload = CreateDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000282
telsoa01c577f2c2018-08-31 09:22:23 +0100283 // Checks that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000284 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
285 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
286 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
287 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6}));
288 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}));
289}
290
arovir019e53a352018-08-31 15:26:35 +0100291BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000292{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100293 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100294}
295
296BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
297{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100298 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>();
telsoa014fcda012018-03-09 14:13:49 +0000299}
300
301BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
302{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100303 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000304}
305
telsoa01c577f2c2018-08-31 09:22:23 +0100306template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
307static void ClCreateFullyConnectedWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000308{
telsoa01c577f2c2018-08-31 09:22:23 +0100309 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000310 ClWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100311 auto workload =
312 CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000313
telsoa01c577f2c2018-08-31 09:22:23 +0100314 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000315 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
316 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
317 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
318 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5}));
319 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7}));
320}
321
telsoa01c577f2c2018-08-31 09:22:23 +0100322
arovir019e53a352018-08-31 15:26:35 +0100323BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest)
telsoa014fcda012018-03-09 14:13:49 +0000324{
Matthew Benthamab8cdc12018-09-17 11:17:41 +0100325 ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100326}
327
328BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest)
329{
Matthew Benthamab8cdc12018-09-17 11:17:41 +0100330 ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100331}
332
telsoa01c577f2c2018-08-31 09:22:23 +0100333template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
narpra0155a97bc2018-10-02 14:35:53 +0100334static void ClNormalizationWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100335{
336 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000337 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000338
telsoa01c577f2c2018-08-31 09:22:23 +0100339 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>
narpra0155a97bc2018-10-02 14:35:53 +0100340 (factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000341
telsoa01c577f2c2018-08-31 09:22:23 +0100342 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000343 NormalizationQueueDescriptor queueDescriptor = workload->GetData();
344 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
345 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
346
347 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 5, 5, 1}));
348 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 5, 5, 1}));
349}
350
narpra0155a97bc2018-10-02 14:35:53 +0100351BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NchwWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000352{
narpra0155a97bc2018-10-02 14:35:53 +0100353 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100354}
355
narpra0155a97bc2018-10-02 14:35:53 +0100356BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100357{
narpra0155a97bc2018-10-02 14:35:53 +0100358 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
359}
360
361BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NhwcWorkload)
362{
363 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
364}
365
366BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
367{
368 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100369}
370
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100371template <typename armnn::DataType DataType>
Nina Drozdb48e6862018-10-09 12:09:56 +0100372static void ClPooling2dWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100373{
374 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000375 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000376
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100377 auto workload = CreatePooling2dWorkloadTest<ClPooling2dWorkload, DataType>(factory, graph, dataLayout);
Nina Drozdb48e6862018-10-09 12:09:56 +0100378
379 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
380 std::initializer_list<unsigned int>({3, 2, 5, 5}) : std::initializer_list<unsigned int>({3, 5, 5, 2});
381 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
382 std::initializer_list<unsigned int>({3, 2, 2, 4}) : std::initializer_list<unsigned int>({3, 2, 4, 2});
telsoa014fcda012018-03-09 14:13:49 +0000383
telsoa01c577f2c2018-08-31 09:22:23 +0100384 // Check that inputs/outputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000385 Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
386 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
387 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
388
Nina Drozdb48e6862018-10-09 12:09:56 +0100389 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
390 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000391}
392
Nina Drozdb48e6862018-10-09 12:09:56 +0100393BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100394{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100395 ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100396}
397
Nina Drozdb48e6862018-10-09 12:09:56 +0100398BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100399{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100400 ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NHWC);
Nina Drozdb48e6862018-10-09 12:09:56 +0100401}
402
403BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NchwWorkload)
404{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100405 ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NCHW);
Nina Drozdb48e6862018-10-09 12:09:56 +0100406}
407
408BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NhwcWorkload)
409{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100410 ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100411}
412
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100413template <typename armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000414static void ClCreateReshapeWorkloadTest()
415{
telsoa01c577f2c2018-08-31 09:22:23 +0100416 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000417 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000418
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100419 auto workload = CreateReshapeWorkloadTest<ClReshapeWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000420
telsoa01c577f2c2018-08-31 09:22:23 +0100421 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000422 ReshapeQueueDescriptor queueDescriptor = workload->GetData();
telsoa01c577f2c2018-08-31 09:22:23 +0100423 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
424 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000425
426 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
427 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4})); // Leading size 1 dimensions are collapsed by ACL.
428}
429
arovir019e53a352018-08-31 15:26:35 +0100430BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000431{
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100432 ClCreateReshapeWorkloadTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100433}
434
435BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
436{
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100437 ClCreateReshapeWorkloadTest<armnn::DataType::Float16>();
telsoa014fcda012018-03-09 14:13:49 +0000438}
439
440BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
441{
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100442 ClCreateReshapeWorkloadTest<armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000443}
444
telsoa01c577f2c2018-08-31 09:22:23 +0100445template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
446static void ClSoftmaxWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000447{
telsoa01c577f2c2018-08-31 09:22:23 +0100448 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000449 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000450
telsoa01c577f2c2018-08-31 09:22:23 +0100451 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000452
arovir019e53a352018-08-31 15:26:35 +0100453 // Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloatWorkload).
telsoa014fcda012018-03-09 14:13:49 +0000454 SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
telsoa01c577f2c2018-08-31 09:22:23 +0100455 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
456 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000457
458 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
459 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
460}
461
telsoa01c577f2c2018-08-31 09:22:23 +0100462
arovir019e53a352018-08-31 15:26:35 +0100463BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkloadTest)
telsoa01c577f2c2018-08-31 09:22:23 +0100464{
arovir019e53a352018-08-31 15:26:35 +0100465 ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100466}
467
468BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest)
469{
arovir019e53a352018-08-31 15:26:35 +0100470 ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100471}
472
Matthew Bentham29cadb32018-10-01 17:22:32 +0100473template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100474static void ClSplitterWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000475{
476 Graph graph;
477 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000478
Matthew Bentham29cadb32018-10-01 17:22:32 +0100479 auto workload = CreateSplitterWorkloadTest<ClSplitterWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000480
telsoa01c577f2c2018-08-31 09:22:23 +0100481 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000482 SplitterQueueDescriptor queueDescriptor = workload->GetData();
483 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
surmeh013537c2c2018-05-18 16:31:43 +0100484 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7}));
485
telsoa014fcda012018-03-09 14:13:49 +0000486 auto outputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
surmeh013537c2c2018-05-18 16:31:43 +0100487 BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7}));
488
telsoa014fcda012018-03-09 14:13:49 +0000489 auto outputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
surmeh013537c2c2018-05-18 16:31:43 +0100490 BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7}));
491
492 auto outputHandle0 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
493 // NOTE: At the moment the CL collapses the tensor to a 2 dim when dimension zero = 1
telsoa01c577f2c2018-08-31 09:22:23 +0100494 // we are raising this difference between the NEON and CL libs as an issue with the compute library team.
surmeh013537c2c2018-05-18 16:31:43 +0100495 BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {7, 7}));
telsoa014fcda012018-03-09 14:13:49 +0000496}
497
arovir019e53a352018-08-31 15:26:35 +0100498BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000499{
Matthew Bentham29cadb32018-10-01 17:22:32 +0100500 ClSplitterWorkloadTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100501}
502
503BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
504{
Matthew Bentham29cadb32018-10-01 17:22:32 +0100505 ClSplitterWorkloadTest<armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100506}
507
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100508template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100509static void ClSplitterMergerTest()
510{
511 // Tests that it is possible to decide which output of the splitter layer
512 // should be lined to which input of the merger layer.
telsoa014fcda012018-03-09 14:13:49 +0000513 // We test that is is possible to specify 0th output
514 // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
515 // of the merger.
516
517 Graph graph;
518 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000519
520 auto workloads =
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100521 CreateSplitterMergerWorkloadTest<ClSplitterWorkload, ClMergerWorkload, DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100522 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000523
524 auto wlSplitter = std::move(workloads.first);
525 auto wlMerger = std::move(workloads.second);
526
telsoa01c577f2c2018-08-31 09:22:23 +0100527 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000528 armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
529 armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
530 armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
531 armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
532
533 BOOST_TEST(sOut0);
534 BOOST_TEST(sOut1);
535 BOOST_TEST(mIn0);
536 BOOST_TEST(mIn1);
537
telsoa01c577f2c2018-08-31 09:22:23 +0100538 //Fliped order of inputs/outputs.
telsoa014fcda012018-03-09 14:13:49 +0000539 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
540 BOOST_TEST(validDataPointers);
541
542
telsoa01c577f2c2018-08-31 09:22:23 +0100543 //Also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor.
telsoa014fcda012018-03-09 14:13:49 +0000544 bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent())
545 && (sOut0->GetTensor().parent() == sOut1->GetTensor().parent());
546
547 BOOST_TEST(validSubTensorParents);
548}
549
arovir019e53a352018-08-31 15:26:35 +0100550BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100551{
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100552 ClSplitterMergerTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100553}
554
555BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload)
556{
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100557 ClSplitterMergerTest<armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100558}
559
560
telsoa014fcda012018-03-09 14:13:49 +0000561BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
562{
563 // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
telsoa01c577f2c2018-08-31 09:22:23 +0100564 // We create a splitter with two outputs. That each of those outputs is used by two different activation layers.
telsoa014fcda012018-03-09 14:13:49 +0000565
566 Graph graph;
567 ClWorkloadFactory factory;
Matthew Bentham29cadb32018-10-01 17:22:32 +0100568 std::unique_ptr<ClSplitterWorkload> wlSplitter;
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +0100569 std::unique_ptr<ClActivationWorkload> wlActiv0_0;
570 std::unique_ptr<ClActivationWorkload> wlActiv0_1;
571 std::unique_ptr<ClActivationWorkload> wlActiv1_0;
572 std::unique_ptr<ClActivationWorkload> wlActiv1_1;
telsoa014fcda012018-03-09 14:13:49 +0000573
Matthew Bentham29cadb32018-10-01 17:22:32 +0100574 CreateSplitterMultipleInputsOneOutputWorkloadTest<ClSplitterWorkload,
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +0100575 ClActivationWorkload, armnn::DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
telsoa01c577f2c2018-08-31 09:22:23 +0100576 wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000577
telsoa01c577f2c2018-08-31 09:22:23 +0100578 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000579 armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
580 armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
581 armnn::ClSubTensorHandle* activ0_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
582 armnn::ClSubTensorHandle* activ0_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
583 armnn::ClSubTensorHandle* activ1_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
584 armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
585
586
587 BOOST_TEST(sOut0);
588 BOOST_TEST(sOut1);
589 BOOST_TEST(activ0_0Im);
590 BOOST_TEST(activ0_1Im);
591 BOOST_TEST(activ1_0Im);
592 BOOST_TEST(activ1_1Im);
593
594 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
595 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
596
597 BOOST_TEST(validDataPointers);
598}
599
600BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl)
601{
602 ClWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100603 CreateMemCopyWorkloads<IClTensorHandle>(factory);
telsoa014fcda012018-03-09 14:13:49 +0000604}
605
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100606template <typename L2NormalizationWorkloadType, typename armnn::DataType DataType>
607static void ClL2NormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000608{
telsoa01c577f2c2018-08-31 09:22:23 +0100609 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000610 ClWorkloadFactory factory;
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100611 auto workload =
612 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000613
telsoa01c577f2c2018-08-31 09:22:23 +0100614 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000615 L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
616 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
617 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
618
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100619 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
620 ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
621 : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
622 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
623 ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
624 : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
625
626 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
627 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000628}
629
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100630BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload)
631{
632 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
633}
634
635BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNhwcWorkload)
636{
637 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
638}
639
640BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
641{
642 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
643}
644
645BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
646{
647 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
648}
649
telsoa01c577f2c2018-08-31 09:22:23 +0100650template <typename LstmWorkloadType>
651static void ClCreateLstmWorkloadTest()
652{
653 Graph graph;
654 ClWorkloadFactory factory;
655 auto workload = CreateLstmWorkloadTest<LstmWorkloadType>(factory, graph);
656
657 LstmQueueDescriptor queueDescriptor = workload->GetData();
658 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
659 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
660 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 }));
661 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 }));
662}
663
arovir019e53a352018-08-31 15:26:35 +0100664BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100665{
arovir019e53a352018-08-31 15:26:35 +0100666 ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
telsoa01c577f2c2018-08-31 09:22:23 +0100667}
668
James Conroy074f3712018-10-03 09:32:03 +0100669template <typename ResizeBilinearWorkloadType, typename armnn::DataType DataType>
670static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
671{
672 Graph graph;
673 ClWorkloadFactory factory;
674
675 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
676
677 // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
678 ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
679 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
680 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
681
682 switch (dataLayout)
683 {
684 case DataLayout::NHWC:
685 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
686 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
687 break;
688 default: // NCHW
689 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
690 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
691 }
692}
693
694BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload)
695{
696 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
697}
698
699BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload)
700{
701 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
702}
703
704BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload)
705{
706 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
707}
708
709BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload)
710{
711 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
712}
telsoa01c577f2c2018-08-31 09:22:23 +0100713
telsoa014fcda012018-03-09 14:13:49 +0000714BOOST_AUTO_TEST_SUITE_END()