blob: b5fc031461636de55f243a138eb0340126df7853 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "ClContextControlFixture.hpp"
telsoa014fcda012018-03-09 14:13:49 +00007
arovir0143095f32018-10-09 18:04:24 +01008#include <backends/MemCopyWorkload.hpp>
Aron Virginas-Tar3b278e92018-10-12 13:00:55 +01009
10#include <backends/aclCommon/test/CreateWorkloadClNeon.hpp>
11
arovir0143095f32018-10-09 18:04:24 +010012#include <backends/cl/ClTensorHandle.hpp>
13#include <backends/cl/ClWorkloadFactory.hpp>
14#include <backends/cl/workloads/ClWorkloads.hpp>
15#include <backends/cl/workloads/ClWorkloadUtils.hpp>
arovir0143095f32018-10-09 18:04:24 +010016
Aron Virginas-Tar3b278e92018-10-12 13:00:55 +010017#include <backends/reference/RefWorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
19boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
20 std::initializer_list<unsigned int> expectedDimensions)
21{
22 return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
23}
24
telsoa01c577f2c2018-08-31 09:22:23 +010025BOOST_FIXTURE_TEST_SUITE(CreateWorkloadCl, ClContextControlFixture)
telsoa014fcda012018-03-09 14:13:49 +000026
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010027template <armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +010028static void ClCreateActivationWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000029{
30 Graph graph;
31 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +000032
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010033 auto workload = CreateActivationWorkloadTest<ClActivationWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000034
telsoa01c577f2c2018-08-31 09:22:23 +010035 // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000036 ActivationQueueDescriptor queueDescriptor = workload->GetData();
37 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
38 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
39
40 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1}));
41 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1}));
42}
43
arovir019e53a352018-08-31 15:26:35 +010044BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +010045{
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010046 ClCreateActivationWorkloadTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010047}
48
49BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
50{
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010051 ClCreateActivationWorkloadTest<armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +010052}
53
David Beck4a8692c2018-09-07 16:19:24 +010054template <typename WorkloadType,
55 typename DescriptorType,
56 typename LayerType,
57 armnn::DataType DataType>
58static void ClCreateArithmethicWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000059{
60 Graph graph;
61 ClWorkloadFactory factory;
David Beck4a8692c2018-09-07 16:19:24 +010062 auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000063
James Conroyb9bf9462018-09-19 11:58:44 +010064 // Checks that inputs/outputs are as we expect them (see definition of CreateArithmeticWorkloadTest).
David Beck4a8692c2018-09-07 16:19:24 +010065 DescriptorType queueDescriptor = workload->GetData();
telsoa014fcda012018-03-09 14:13:49 +000066 auto inputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
67 auto inputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
68 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
69 BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3}));
70 BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3}));
71 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
72}
73
arovir019e53a352018-08-31 15:26:35 +010074BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +000075{
Nattapat Chaimanowongcd066ca2018-10-10 12:11:50 +010076 ClCreateArithmethicWorkloadTest<ClAdditionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010077 AdditionQueueDescriptor,
78 AdditionLayer,
79 armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010080}
81
82BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
83{
Nattapat Chaimanowongcd066ca2018-10-10 12:11:50 +010084 ClCreateArithmethicWorkloadTest<ClAdditionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010085 AdditionQueueDescriptor,
86 AdditionLayer,
87 armnn::DataType::Float16>();
88}
89
90BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
91{
Matthew Bentham092b3042018-10-01 16:39:28 +010092 ClCreateArithmethicWorkloadTest<ClSubtractionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010093 SubtractionQueueDescriptor,
94 SubtractionLayer,
95 armnn::DataType::Float32>();
96}
97
98BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
99{
Matthew Bentham092b3042018-10-01 16:39:28 +0100100 ClCreateArithmethicWorkloadTest<ClSubtractionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +0100101 SubtractionQueueDescriptor,
102 SubtractionLayer,
103 armnn::DataType::Float16>();
104}
105
106BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest)
107{
Matthew Benthame2ec3302018-10-01 11:32:48 +0100108 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
David Beck4a8692c2018-09-07 16:19:24 +0100109 MultiplicationQueueDescriptor,
110 MultiplicationLayer,
111 armnn::DataType::Float32>();
112}
113
114BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest)
115{
Matthew Benthame2ec3302018-10-01 11:32:48 +0100116 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
David Beck4a8692c2018-09-07 16:19:24 +0100117 MultiplicationQueueDescriptor,
118 MultiplicationLayer,
119 armnn::DataType::Float16>();
120}
121
Matthew Benthame2ec3302018-10-01 11:32:48 +0100122BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
123{
124 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
125 MultiplicationQueueDescriptor,
126 MultiplicationLayer,
127 armnn::DataType::QuantisedAsymm8>();
128}
129
David Beck4a8692c2018-09-07 16:19:24 +0100130BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
131{
132 ClCreateArithmethicWorkloadTest<ClDivisionFloatWorkload,
133 DivisionQueueDescriptor,
134 DivisionLayer,
135 armnn::DataType::Float32>();
136}
137
138BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest)
139{
140 ClCreateArithmethicWorkloadTest<ClDivisionFloatWorkload,
141 DivisionQueueDescriptor,
142 DivisionLayer,
143 armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100144}
145
146template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
Nikhil Rajd1340932018-10-18 14:27:50 +0100147static void ClCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100148{
149 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000150 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000151
telsoa01c577f2c2018-08-31 09:22:23 +0100152 auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>
Nikhil Rajd1340932018-10-18 14:27:50 +0100153 (factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000154
telsoa01c577f2c2018-08-31 09:22:23 +0100155 // Checks that inputs/outputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000156 BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
157 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
158 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
159
Nikhil Rajd1340932018-10-18 14:27:50 +0100160 switch (dataLayout)
161 {
162 case DataLayout::NHWC:
163 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
164 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 }));
165 break;
166 default: // NCHW
167 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
168 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 }));
169 }
telsoa014fcda012018-03-09 14:13:49 +0000170}
171
Nikhil Rajd1340932018-10-18 14:27:50 +0100172BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNchwWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000173{
Nikhil Rajd1340932018-10-18 14:27:50 +0100174 ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
175 armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100176}
telsoa014fcda012018-03-09 14:13:49 +0000177
Nikhil Rajd1340932018-10-18 14:27:50 +0100178BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100179{
Nikhil Rajd1340932018-10-18 14:27:50 +0100180 ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
181 armnn::DataType::Float16>(DataLayout::NCHW);
182}
183
184BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNhwcWorkload)
185{
186 ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
187 armnn::DataType::Float32>(DataLayout::NHWC);
188}
189
190BOOST_AUTO_TEST_CASE(CreateBatchNormalizationNhwcFloat16NhwcWorkload)
191{
192 ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
193 armnn::DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100194}
195
196BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
197{
198 Graph graph;
199 ClWorkloadFactory factory;
200 auto workload = CreateConvertFp16ToFp32WorkloadTest<ClConvertFp16ToFp32Workload>(factory, graph);
201
202 ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
203 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
204 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
205
206 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3}));
207 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3}));
208 BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
209 BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
210}
211
212BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
213{
214 Graph graph;
215 ClWorkloadFactory factory;
216 auto workload = CreateConvertFp32ToFp16WorkloadTest<ClConvertFp32ToFp16Workload>(factory, graph);
217
218 ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
219 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
220 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
221
222 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3}));
223 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3}));
224 BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
225 BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
226}
227
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100228template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
229static void ClConvolution2dWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100230{
231 Graph graph;
232 ClWorkloadFactory factory;
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100233 auto workload = CreateConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory,
234 graph,
235 dataLayout);
236
237 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
238 std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
239 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
240 std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
telsoa01c577f2c2018-08-31 09:22:23 +0100241
242 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000243 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
244 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
245 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100246 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
247 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000248}
249
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100250BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000251{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100252 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100253}
254
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100255BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100256{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100257 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000258}
259
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100260BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
261{
262 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
263}
264
265BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
266{
267 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
268}
269
Nikhil Rajcec6b652018-10-12 13:51:57 +0100270template <typename DepthwiseConvolutionWorkloadType, typename armnn::DataType DataType>
271static void ClDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
272{
273 Graph graph;
274 ClWorkloadFactory factory;
275
276 auto workload = CreateDepthwiseConvolution2dWorkloadTest<DepthwiseConvolutionWorkloadType, DataType>
277 (factory, graph, dataLayout);
278
279 // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
280 DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
281 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
282 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
283
284 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
285 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
286 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
287 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
288 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
289 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
290
291 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
292 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
293}
294
295BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
296{
297 ClDepthwiseConvolutionWorkloadTest<ClDepthwiseConvolutionWorkload, DataType::Float32>(DataLayout::NHWC);
298}
299
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100300template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100301static void ClDirectConvolution2dWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000302{
telsoa01c577f2c2018-08-31 09:22:23 +0100303 Graph graph;
304 ClWorkloadFactory factory;
Matthew Benthamd8067922018-10-03 17:18:04 +0100305 auto workload = CreateDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000306
telsoa01c577f2c2018-08-31 09:22:23 +0100307 // Checks that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000308 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
309 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
310 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
311 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6}));
312 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}));
313}
314
arovir019e53a352018-08-31 15:26:35 +0100315BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000316{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100317 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100318}
319
320BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
321{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100322 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>();
telsoa014fcda012018-03-09 14:13:49 +0000323}
324
325BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
326{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100327 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000328}
329
telsoa01c577f2c2018-08-31 09:22:23 +0100330template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
331static void ClCreateFullyConnectedWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000332{
telsoa01c577f2c2018-08-31 09:22:23 +0100333 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000334 ClWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100335 auto workload =
336 CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000337
telsoa01c577f2c2018-08-31 09:22:23 +0100338 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000339 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
340 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
341 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
342 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5}));
343 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7}));
344}
345
telsoa01c577f2c2018-08-31 09:22:23 +0100346
arovir019e53a352018-08-31 15:26:35 +0100347BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest)
telsoa014fcda012018-03-09 14:13:49 +0000348{
Matthew Benthamab8cdc12018-09-17 11:17:41 +0100349 ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100350}
351
352BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest)
353{
Matthew Benthamab8cdc12018-09-17 11:17:41 +0100354 ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100355}
356
telsoa01c577f2c2018-08-31 09:22:23 +0100357template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
narpra0155a97bc2018-10-02 14:35:53 +0100358static void ClNormalizationWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100359{
360 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000361 ClWorkloadFactory factory;
Matteo Martincigha160b242018-10-18 10:33:23 +0100362 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000363
telsoa01c577f2c2018-08-31 09:22:23 +0100364 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000365 NormalizationQueueDescriptor queueDescriptor = workload->GetData();
366 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
367 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
368
Matteo Martincigha160b242018-10-18 10:33:23 +0100369 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
370 std::initializer_list<unsigned int>({3, 5, 5, 1}) : std::initializer_list<unsigned int>({3, 1, 5, 5});
371 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
372 std::initializer_list<unsigned int>({3, 5, 5, 1}) : std::initializer_list<unsigned int>({3, 1, 5, 5});
373
374 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
375 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000376}
377
narpra0155a97bc2018-10-02 14:35:53 +0100378BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NchwWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000379{
narpra0155a97bc2018-10-02 14:35:53 +0100380 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100381}
382
narpra0155a97bc2018-10-02 14:35:53 +0100383BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100384{
narpra0155a97bc2018-10-02 14:35:53 +0100385 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
386}
387
388BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NhwcWorkload)
389{
390 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
391}
392
393BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
394{
395 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100396}
397
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100398template <typename armnn::DataType DataType>
Nina Drozdb48e6862018-10-09 12:09:56 +0100399static void ClPooling2dWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100400{
401 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000402 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000403
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100404 auto workload = CreatePooling2dWorkloadTest<ClPooling2dWorkload, DataType>(factory, graph, dataLayout);
Nina Drozdb48e6862018-10-09 12:09:56 +0100405
406 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
407 std::initializer_list<unsigned int>({3, 2, 5, 5}) : std::initializer_list<unsigned int>({3, 5, 5, 2});
408 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
409 std::initializer_list<unsigned int>({3, 2, 2, 4}) : std::initializer_list<unsigned int>({3, 2, 4, 2});
telsoa014fcda012018-03-09 14:13:49 +0000410
telsoa01c577f2c2018-08-31 09:22:23 +0100411 // Check that inputs/outputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000412 Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
413 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
414 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
415
Nina Drozdb48e6862018-10-09 12:09:56 +0100416 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
417 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000418}
419
Nina Drozdb48e6862018-10-09 12:09:56 +0100420BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100421{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100422 ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100423}
424
Nina Drozdb48e6862018-10-09 12:09:56 +0100425BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100426{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100427 ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NHWC);
Nina Drozdb48e6862018-10-09 12:09:56 +0100428}
429
430BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NchwWorkload)
431{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100432 ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NCHW);
Nina Drozdb48e6862018-10-09 12:09:56 +0100433}
434
435BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NhwcWorkload)
436{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100437 ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100438}
439
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100440template <typename armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000441static void ClCreateReshapeWorkloadTest()
442{
telsoa01c577f2c2018-08-31 09:22:23 +0100443 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000444 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000445
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100446 auto workload = CreateReshapeWorkloadTest<ClReshapeWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000447
telsoa01c577f2c2018-08-31 09:22:23 +0100448 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000449 ReshapeQueueDescriptor queueDescriptor = workload->GetData();
telsoa01c577f2c2018-08-31 09:22:23 +0100450 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
451 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000452
453 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
454 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4})); // Leading size 1 dimensions are collapsed by ACL.
455}
456
arovir019e53a352018-08-31 15:26:35 +0100457BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000458{
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100459 ClCreateReshapeWorkloadTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100460}
461
462BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
463{
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100464 ClCreateReshapeWorkloadTest<armnn::DataType::Float16>();
telsoa014fcda012018-03-09 14:13:49 +0000465}
466
467BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
468{
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100469 ClCreateReshapeWorkloadTest<armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000470}
471
telsoa01c577f2c2018-08-31 09:22:23 +0100472template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
473static void ClSoftmaxWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000474{
telsoa01c577f2c2018-08-31 09:22:23 +0100475 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000476 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000477
telsoa01c577f2c2018-08-31 09:22:23 +0100478 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000479
arovir019e53a352018-08-31 15:26:35 +0100480 // Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloatWorkload).
telsoa014fcda012018-03-09 14:13:49 +0000481 SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
telsoa01c577f2c2018-08-31 09:22:23 +0100482 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
483 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000484
485 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
486 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
487}
488
telsoa01c577f2c2018-08-31 09:22:23 +0100489
arovir019e53a352018-08-31 15:26:35 +0100490BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkloadTest)
telsoa01c577f2c2018-08-31 09:22:23 +0100491{
arovir019e53a352018-08-31 15:26:35 +0100492 ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100493}
494
495BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest)
496{
arovir019e53a352018-08-31 15:26:35 +0100497 ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100498}
499
Matthew Bentham29cadb32018-10-01 17:22:32 +0100500template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100501static void ClSplitterWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000502{
503 Graph graph;
504 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000505
Matthew Bentham29cadb32018-10-01 17:22:32 +0100506 auto workload = CreateSplitterWorkloadTest<ClSplitterWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000507
telsoa01c577f2c2018-08-31 09:22:23 +0100508 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000509 SplitterQueueDescriptor queueDescriptor = workload->GetData();
510 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
surmeh013537c2c2018-05-18 16:31:43 +0100511 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7}));
512
telsoa014fcda012018-03-09 14:13:49 +0000513 auto outputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
surmeh013537c2c2018-05-18 16:31:43 +0100514 BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7}));
515
telsoa014fcda012018-03-09 14:13:49 +0000516 auto outputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
surmeh013537c2c2018-05-18 16:31:43 +0100517 BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7}));
518
519 auto outputHandle0 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
520 // NOTE: At the moment the CL collapses the tensor to a 2 dim when dimension zero = 1
telsoa01c577f2c2018-08-31 09:22:23 +0100521 // we are raising this difference between the NEON and CL libs as an issue with the compute library team.
surmeh013537c2c2018-05-18 16:31:43 +0100522 BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {7, 7}));
telsoa014fcda012018-03-09 14:13:49 +0000523}
524
arovir019e53a352018-08-31 15:26:35 +0100525BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000526{
Matthew Bentham29cadb32018-10-01 17:22:32 +0100527 ClSplitterWorkloadTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100528}
529
530BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
531{
Matthew Bentham29cadb32018-10-01 17:22:32 +0100532 ClSplitterWorkloadTest<armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100533}
534
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100535template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100536static void ClSplitterMergerTest()
537{
538 // Tests that it is possible to decide which output of the splitter layer
539 // should be lined to which input of the merger layer.
telsoa014fcda012018-03-09 14:13:49 +0000540 // We test that is is possible to specify 0th output
541 // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
542 // of the merger.
543
544 Graph graph;
545 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000546
547 auto workloads =
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100548 CreateSplitterMergerWorkloadTest<ClSplitterWorkload, ClMergerWorkload, DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100549 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000550
551 auto wlSplitter = std::move(workloads.first);
552 auto wlMerger = std::move(workloads.second);
553
telsoa01c577f2c2018-08-31 09:22:23 +0100554 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000555 armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
556 armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
557 armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
558 armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
559
560 BOOST_TEST(sOut0);
561 BOOST_TEST(sOut1);
562 BOOST_TEST(mIn0);
563 BOOST_TEST(mIn1);
564
telsoa01c577f2c2018-08-31 09:22:23 +0100565 //Fliped order of inputs/outputs.
telsoa014fcda012018-03-09 14:13:49 +0000566 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
567 BOOST_TEST(validDataPointers);
568
569
telsoa01c577f2c2018-08-31 09:22:23 +0100570 //Also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor.
telsoa014fcda012018-03-09 14:13:49 +0000571 bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent())
572 && (sOut0->GetTensor().parent() == sOut1->GetTensor().parent());
573
574 BOOST_TEST(validSubTensorParents);
575}
576
arovir019e53a352018-08-31 15:26:35 +0100577BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100578{
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100579 ClSplitterMergerTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100580}
581
582BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload)
583{
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100584 ClSplitterMergerTest<armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100585}
586
587
telsoa014fcda012018-03-09 14:13:49 +0000588BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
589{
590 // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
telsoa01c577f2c2018-08-31 09:22:23 +0100591 // We create a splitter with two outputs. That each of those outputs is used by two different activation layers.
telsoa014fcda012018-03-09 14:13:49 +0000592
593 Graph graph;
594 ClWorkloadFactory factory;
Matthew Bentham29cadb32018-10-01 17:22:32 +0100595 std::unique_ptr<ClSplitterWorkload> wlSplitter;
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +0100596 std::unique_ptr<ClActivationWorkload> wlActiv0_0;
597 std::unique_ptr<ClActivationWorkload> wlActiv0_1;
598 std::unique_ptr<ClActivationWorkload> wlActiv1_0;
599 std::unique_ptr<ClActivationWorkload> wlActiv1_1;
telsoa014fcda012018-03-09 14:13:49 +0000600
Matthew Bentham29cadb32018-10-01 17:22:32 +0100601 CreateSplitterMultipleInputsOneOutputWorkloadTest<ClSplitterWorkload,
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +0100602 ClActivationWorkload, armnn::DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
telsoa01c577f2c2018-08-31 09:22:23 +0100603 wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000604
telsoa01c577f2c2018-08-31 09:22:23 +0100605 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000606 armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
607 armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
608 armnn::ClSubTensorHandle* activ0_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
609 armnn::ClSubTensorHandle* activ0_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
610 armnn::ClSubTensorHandle* activ1_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
611 armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
612
613
614 BOOST_TEST(sOut0);
615 BOOST_TEST(sOut1);
616 BOOST_TEST(activ0_0Im);
617 BOOST_TEST(activ0_1Im);
618 BOOST_TEST(activ1_0Im);
619 BOOST_TEST(activ1_1Im);
620
621 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
622 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
623
624 BOOST_TEST(validDataPointers);
625}
626
627BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl)
628{
629 ClWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100630 CreateMemCopyWorkloads<IClTensorHandle>(factory);
telsoa014fcda012018-03-09 14:13:49 +0000631}
632
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100633template <typename L2NormalizationWorkloadType, typename armnn::DataType DataType>
634static void ClL2NormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000635{
telsoa01c577f2c2018-08-31 09:22:23 +0100636 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000637 ClWorkloadFactory factory;
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100638 auto workload =
639 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000640
telsoa01c577f2c2018-08-31 09:22:23 +0100641 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000642 L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
643 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
644 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
645
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100646 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
647 ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
648 : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
649 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
650 ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
651 : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
652
653 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
654 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000655}
656
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100657BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload)
658{
659 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
660}
661
662BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNhwcWorkload)
663{
664 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
665}
666
667BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
668{
669 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
670}
671
672BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
673{
674 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
675}
676
telsoa01c577f2c2018-08-31 09:22:23 +0100677template <typename LstmWorkloadType>
678static void ClCreateLstmWorkloadTest()
679{
680 Graph graph;
681 ClWorkloadFactory factory;
682 auto workload = CreateLstmWorkloadTest<LstmWorkloadType>(factory, graph);
683
684 LstmQueueDescriptor queueDescriptor = workload->GetData();
685 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
686 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
687 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 }));
688 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 }));
689}
690
arovir019e53a352018-08-31 15:26:35 +0100691BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100692{
arovir019e53a352018-08-31 15:26:35 +0100693 ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
telsoa01c577f2c2018-08-31 09:22:23 +0100694}
695
James Conroy074f3712018-10-03 09:32:03 +0100696template <typename ResizeBilinearWorkloadType, typename armnn::DataType DataType>
697static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
698{
699 Graph graph;
700 ClWorkloadFactory factory;
701
702 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
703
704 // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
705 ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
706 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
707 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
708
709 switch (dataLayout)
710 {
711 case DataLayout::NHWC:
712 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
713 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
714 break;
715 default: // NCHW
716 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
717 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
718 }
719}
720
721BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload)
722{
723 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
724}
725
726BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload)
727{
728 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
729}
730
731BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload)
732{
733 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
734}
735
736BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload)
737{
738 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
739}
telsoa01c577f2c2018-08-31 09:22:23 +0100740
telsoa014fcda012018-03-09 14:13:49 +0000741BOOST_AUTO_TEST_SUITE_END()