blob: 526dc68fc52d3750c1728dc42ebd09fa08dd27a9 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
telsoa01c577f2c2018-08-31 09:22:23 +01006#include "ClContextControlFixture.hpp"
telsoa014fcda012018-03-09 14:13:49 +00007
arovir0143095f32018-10-09 18:04:24 +01008#include <backends/MemCopyWorkload.hpp>
Aron Virginas-Tar3b278e92018-10-12 13:00:55 +01009
10#include <backends/aclCommon/test/CreateWorkloadClNeon.hpp>
11
arovir0143095f32018-10-09 18:04:24 +010012#include <backends/cl/ClTensorHandle.hpp>
13#include <backends/cl/ClWorkloadFactory.hpp>
14#include <backends/cl/workloads/ClWorkloads.hpp>
15#include <backends/cl/workloads/ClWorkloadUtils.hpp>
arovir0143095f32018-10-09 18:04:24 +010016
Aron Virginas-Tar3b278e92018-10-12 13:00:55 +010017#include <backends/reference/RefWorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
19boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
20 std::initializer_list<unsigned int> expectedDimensions)
21{
22 return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
23}
24
telsoa01c577f2c2018-08-31 09:22:23 +010025BOOST_FIXTURE_TEST_SUITE(CreateWorkloadCl, ClContextControlFixture)
telsoa014fcda012018-03-09 14:13:49 +000026
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010027template <armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +010028static void ClCreateActivationWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000029{
30 Graph graph;
31 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +000032
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010033 auto workload = CreateActivationWorkloadTest<ClActivationWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000034
telsoa01c577f2c2018-08-31 09:22:23 +010035 // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000036 ActivationQueueDescriptor queueDescriptor = workload->GetData();
37 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
38 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
39
40 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1}));
41 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1}));
42}
43
arovir019e53a352018-08-31 15:26:35 +010044BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +010045{
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010046 ClCreateActivationWorkloadTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010047}
48
49BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
50{
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010051 ClCreateActivationWorkloadTest<armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +010052}
53
David Beck4a8692c2018-09-07 16:19:24 +010054template <typename WorkloadType,
55 typename DescriptorType,
56 typename LayerType,
57 armnn::DataType DataType>
58static void ClCreateArithmethicWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000059{
60 Graph graph;
61 ClWorkloadFactory factory;
David Beck4a8692c2018-09-07 16:19:24 +010062 auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000063
James Conroyb9bf9462018-09-19 11:58:44 +010064 // Checks that inputs/outputs are as we expect them (see definition of CreateArithmeticWorkloadTest).
David Beck4a8692c2018-09-07 16:19:24 +010065 DescriptorType queueDescriptor = workload->GetData();
telsoa014fcda012018-03-09 14:13:49 +000066 auto inputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
67 auto inputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
68 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
69 BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3}));
70 BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3}));
71 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
72}
73
arovir019e53a352018-08-31 15:26:35 +010074BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +000075{
Nattapat Chaimanowongcd066ca2018-10-10 12:11:50 +010076 ClCreateArithmethicWorkloadTest<ClAdditionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010077 AdditionQueueDescriptor,
78 AdditionLayer,
79 armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010080}
81
82BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
83{
Nattapat Chaimanowongcd066ca2018-10-10 12:11:50 +010084 ClCreateArithmethicWorkloadTest<ClAdditionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010085 AdditionQueueDescriptor,
86 AdditionLayer,
87 armnn::DataType::Float16>();
88}
89
90BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
91{
Matthew Bentham092b3042018-10-01 16:39:28 +010092 ClCreateArithmethicWorkloadTest<ClSubtractionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +010093 SubtractionQueueDescriptor,
94 SubtractionLayer,
95 armnn::DataType::Float32>();
96}
97
98BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
99{
Matthew Bentham092b3042018-10-01 16:39:28 +0100100 ClCreateArithmethicWorkloadTest<ClSubtractionWorkload,
David Beck4a8692c2018-09-07 16:19:24 +0100101 SubtractionQueueDescriptor,
102 SubtractionLayer,
103 armnn::DataType::Float16>();
104}
105
106BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest)
107{
Matthew Benthame2ec3302018-10-01 11:32:48 +0100108 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
David Beck4a8692c2018-09-07 16:19:24 +0100109 MultiplicationQueueDescriptor,
110 MultiplicationLayer,
111 armnn::DataType::Float32>();
112}
113
114BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest)
115{
Matthew Benthame2ec3302018-10-01 11:32:48 +0100116 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
David Beck4a8692c2018-09-07 16:19:24 +0100117 MultiplicationQueueDescriptor,
118 MultiplicationLayer,
119 armnn::DataType::Float16>();
120}
121
Matthew Benthame2ec3302018-10-01 11:32:48 +0100122BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
123{
124 ClCreateArithmethicWorkloadTest<ClMultiplicationWorkload,
125 MultiplicationQueueDescriptor,
126 MultiplicationLayer,
127 armnn::DataType::QuantisedAsymm8>();
128}
129
David Beck4a8692c2018-09-07 16:19:24 +0100130BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
131{
132 ClCreateArithmethicWorkloadTest<ClDivisionFloatWorkload,
133 DivisionQueueDescriptor,
134 DivisionLayer,
135 armnn::DataType::Float32>();
136}
137
138BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest)
139{
140 ClCreateArithmethicWorkloadTest<ClDivisionFloatWorkload,
141 DivisionQueueDescriptor,
142 DivisionLayer,
143 armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100144}
145
146template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
147static void ClCreateBatchNormalizationWorkloadTest()
148{
149 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000150 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000151
telsoa01c577f2c2018-08-31 09:22:23 +0100152 auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>
153 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000154
telsoa01c577f2c2018-08-31 09:22:23 +0100155 // Checks that inputs/outputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000156 BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
157 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
158 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
159
160 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 1, 1}));
161 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3, 1, 1}));
162}
163
arovir019e53a352018-08-31 15:26:35 +0100164BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000165{
arovir019e53a352018-08-31 15:26:35 +0100166 ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100167}
telsoa014fcda012018-03-09 14:13:49 +0000168
telsoa01c577f2c2018-08-31 09:22:23 +0100169BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
170{
arovir019e53a352018-08-31 15:26:35 +0100171 ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100172}
173
174BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
175{
176 Graph graph;
177 ClWorkloadFactory factory;
178 auto workload = CreateConvertFp16ToFp32WorkloadTest<ClConvertFp16ToFp32Workload>(factory, graph);
179
180 ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
181 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
182 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
183
184 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3}));
185 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3}));
186 BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
187 BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
188}
189
190BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
191{
192 Graph graph;
193 ClWorkloadFactory factory;
194 auto workload = CreateConvertFp32ToFp16WorkloadTest<ClConvertFp32ToFp16Workload>(factory, graph);
195
196 ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
197 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
198 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
199
200 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 3}));
201 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 3}));
202 BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
203 BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
204}
205
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100206template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
207static void ClConvolution2dWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100208{
209 Graph graph;
210 ClWorkloadFactory factory;
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100211 auto workload = CreateConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory,
212 graph,
213 dataLayout);
214
215 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
216 std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
217 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
218 std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
telsoa01c577f2c2018-08-31 09:22:23 +0100219
220 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000221 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
222 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
223 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100224 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
225 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000226}
227
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100228BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000229{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100230 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100231}
232
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100233BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100234{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100235 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +0000236}
237
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100238BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
239{
240 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
241}
242
243BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
244{
245 ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
246}
247
Nikhil Rajcec6b652018-10-12 13:51:57 +0100248template <typename DepthwiseConvolutionWorkloadType, typename armnn::DataType DataType>
249static void ClDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
250{
251 Graph graph;
252 ClWorkloadFactory factory;
253
254 auto workload = CreateDepthwiseConvolution2dWorkloadTest<DepthwiseConvolutionWorkloadType, DataType>
255 (factory, graph, dataLayout);
256
257 // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
258 DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
259 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
260 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
261
262 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
263 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
264 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
265 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
266 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
267 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
268
269 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
270 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
271}
272
273BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
274{
275 ClDepthwiseConvolutionWorkloadTest<ClDepthwiseConvolutionWorkload, DataType::Float32>(DataLayout::NHWC);
276}
277
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100278template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100279static void ClDirectConvolution2dWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000280{
telsoa01c577f2c2018-08-31 09:22:23 +0100281 Graph graph;
282 ClWorkloadFactory factory;
Matthew Benthamd8067922018-10-03 17:18:04 +0100283 auto workload = CreateDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000284
telsoa01c577f2c2018-08-31 09:22:23 +0100285 // Checks that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000286 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
287 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
288 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
289 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6}));
290 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}));
291}
292
arovir019e53a352018-08-31 15:26:35 +0100293BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000294{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100295 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100296}
297
298BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
299{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100300 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>();
telsoa014fcda012018-03-09 14:13:49 +0000301}
302
303BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
304{
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100305 ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000306}
307
telsoa01c577f2c2018-08-31 09:22:23 +0100308template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
309static void ClCreateFullyConnectedWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000310{
telsoa01c577f2c2018-08-31 09:22:23 +0100311 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000312 ClWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100313 auto workload =
314 CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000315
telsoa01c577f2c2018-08-31 09:22:23 +0100316 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000317 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
318 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
319 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
320 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5}));
321 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7}));
322}
323
telsoa01c577f2c2018-08-31 09:22:23 +0100324
arovir019e53a352018-08-31 15:26:35 +0100325BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest)
telsoa014fcda012018-03-09 14:13:49 +0000326{
Matthew Benthamab8cdc12018-09-17 11:17:41 +0100327 ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100328}
329
330BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest)
331{
Matthew Benthamab8cdc12018-09-17 11:17:41 +0100332 ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100333}
334
telsoa01c577f2c2018-08-31 09:22:23 +0100335template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
narpra0155a97bc2018-10-02 14:35:53 +0100336static void ClNormalizationWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100337{
338 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000339 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000340
telsoa01c577f2c2018-08-31 09:22:23 +0100341 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>
narpra0155a97bc2018-10-02 14:35:53 +0100342 (factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000343
telsoa01c577f2c2018-08-31 09:22:23 +0100344 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000345 NormalizationQueueDescriptor queueDescriptor = workload->GetData();
346 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
347 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
348
349 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 5, 5, 1}));
350 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 5, 5, 1}));
351}
352
narpra0155a97bc2018-10-02 14:35:53 +0100353BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NchwWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000354{
narpra0155a97bc2018-10-02 14:35:53 +0100355 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100356}
357
narpra0155a97bc2018-10-02 14:35:53 +0100358BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100359{
narpra0155a97bc2018-10-02 14:35:53 +0100360 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
361}
362
363BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NhwcWorkload)
364{
365 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
366}
367
368BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
369{
370 ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100371}
372
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100373template <typename armnn::DataType DataType>
Nina Drozdb48e6862018-10-09 12:09:56 +0100374static void ClPooling2dWorkloadTest(DataLayout dataLayout)
telsoa01c577f2c2018-08-31 09:22:23 +0100375{
376 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000377 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000378
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100379 auto workload = CreatePooling2dWorkloadTest<ClPooling2dWorkload, DataType>(factory, graph, dataLayout);
Nina Drozdb48e6862018-10-09 12:09:56 +0100380
381 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
382 std::initializer_list<unsigned int>({3, 2, 5, 5}) : std::initializer_list<unsigned int>({3, 5, 5, 2});
383 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
384 std::initializer_list<unsigned int>({3, 2, 2, 4}) : std::initializer_list<unsigned int>({3, 2, 4, 2});
telsoa014fcda012018-03-09 14:13:49 +0000385
telsoa01c577f2c2018-08-31 09:22:23 +0100386 // Check that inputs/outputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000387 Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
388 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
389 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
390
Nina Drozdb48e6862018-10-09 12:09:56 +0100391 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
392 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Nina Drozdb48e6862018-10-09 12:09:56 +0100395BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100396{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100397 ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100398}
399
Nina Drozdb48e6862018-10-09 12:09:56 +0100400BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100401{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100402 ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NHWC);
Nina Drozdb48e6862018-10-09 12:09:56 +0100403}
404
405BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NchwWorkload)
406{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100407 ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NCHW);
Nina Drozdb48e6862018-10-09 12:09:56 +0100408}
409
410BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NhwcWorkload)
411{
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +0100412 ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100413}
414
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100415template <typename armnn::DataType DataType>
telsoa014fcda012018-03-09 14:13:49 +0000416static void ClCreateReshapeWorkloadTest()
417{
telsoa01c577f2c2018-08-31 09:22:23 +0100418 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000419 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000420
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100421 auto workload = CreateReshapeWorkloadTest<ClReshapeWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000422
telsoa01c577f2c2018-08-31 09:22:23 +0100423 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000424 ReshapeQueueDescriptor queueDescriptor = workload->GetData();
telsoa01c577f2c2018-08-31 09:22:23 +0100425 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
426 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000427
428 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
429 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4})); // Leading size 1 dimensions are collapsed by ACL.
430}
431
arovir019e53a352018-08-31 15:26:35 +0100432BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000433{
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100434 ClCreateReshapeWorkloadTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100435}
436
437BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
438{
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100439 ClCreateReshapeWorkloadTest<armnn::DataType::Float16>();
telsoa014fcda012018-03-09 14:13:49 +0000440}
441
442BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
443{
Nattapat Chaimanowonga76698c2018-10-11 10:29:15 +0100444 ClCreateReshapeWorkloadTest<armnn::DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000445}
446
telsoa01c577f2c2018-08-31 09:22:23 +0100447template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
448static void ClSoftmaxWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000449{
telsoa01c577f2c2018-08-31 09:22:23 +0100450 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000451 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000452
telsoa01c577f2c2018-08-31 09:22:23 +0100453 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000454
arovir019e53a352018-08-31 15:26:35 +0100455 // Checks that inputs/outputs are as we expect them (see definition of ClSoftmaxFloatWorkload).
telsoa014fcda012018-03-09 14:13:49 +0000456 SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
telsoa01c577f2c2018-08-31 09:22:23 +0100457 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
458 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000459
460 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
461 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
462}
463
telsoa01c577f2c2018-08-31 09:22:23 +0100464
arovir019e53a352018-08-31 15:26:35 +0100465BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkloadTest)
telsoa01c577f2c2018-08-31 09:22:23 +0100466{
arovir019e53a352018-08-31 15:26:35 +0100467 ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100468}
469
470BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest)
471{
arovir019e53a352018-08-31 15:26:35 +0100472 ClSoftmaxWorkloadTest<ClSoftmaxFloatWorkload, armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100473}
474
Matthew Bentham29cadb32018-10-01 17:22:32 +0100475template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100476static void ClSplitterWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000477{
478 Graph graph;
479 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000480
Matthew Bentham29cadb32018-10-01 17:22:32 +0100481 auto workload = CreateSplitterWorkloadTest<ClSplitterWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000482
telsoa01c577f2c2018-08-31 09:22:23 +0100483 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000484 SplitterQueueDescriptor queueDescriptor = workload->GetData();
485 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
surmeh013537c2c2018-05-18 16:31:43 +0100486 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7}));
487
telsoa014fcda012018-03-09 14:13:49 +0000488 auto outputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
surmeh013537c2c2018-05-18 16:31:43 +0100489 BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7}));
490
telsoa014fcda012018-03-09 14:13:49 +0000491 auto outputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
surmeh013537c2c2018-05-18 16:31:43 +0100492 BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7}));
493
494 auto outputHandle0 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
495 // NOTE: At the moment the CL collapses the tensor to a 2 dim when dimension zero = 1
telsoa01c577f2c2018-08-31 09:22:23 +0100496 // we are raising this difference between the NEON and CL libs as an issue with the compute library team.
surmeh013537c2c2018-05-18 16:31:43 +0100497 BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {7, 7}));
telsoa014fcda012018-03-09 14:13:49 +0000498}
499
arovir019e53a352018-08-31 15:26:35 +0100500BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000501{
Matthew Bentham29cadb32018-10-01 17:22:32 +0100502 ClSplitterWorkloadTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100503}
504
505BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
506{
Matthew Bentham29cadb32018-10-01 17:22:32 +0100507 ClSplitterWorkloadTest<armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100508}
509
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100510template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100511static void ClSplitterMergerTest()
512{
513 // Tests that it is possible to decide which output of the splitter layer
514 // should be lined to which input of the merger layer.
telsoa014fcda012018-03-09 14:13:49 +0000515 // We test that is is possible to specify 0th output
516 // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
517 // of the merger.
518
519 Graph graph;
520 ClWorkloadFactory factory;
telsoa014fcda012018-03-09 14:13:49 +0000521
522 auto workloads =
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100523 CreateSplitterMergerWorkloadTest<ClSplitterWorkload, ClMergerWorkload, DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100524 (factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000525
526 auto wlSplitter = std::move(workloads.first);
527 auto wlMerger = std::move(workloads.second);
528
telsoa01c577f2c2018-08-31 09:22:23 +0100529 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000530 armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
531 armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
532 armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
533 armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
534
535 BOOST_TEST(sOut0);
536 BOOST_TEST(sOut1);
537 BOOST_TEST(mIn0);
538 BOOST_TEST(mIn1);
539
telsoa01c577f2c2018-08-31 09:22:23 +0100540 //Fliped order of inputs/outputs.
telsoa014fcda012018-03-09 14:13:49 +0000541 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
542 BOOST_TEST(validDataPointers);
543
544
telsoa01c577f2c2018-08-31 09:22:23 +0100545 //Also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor.
telsoa014fcda012018-03-09 14:13:49 +0000546 bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent())
547 && (sOut0->GetTensor().parent() == sOut1->GetTensor().parent());
548
549 BOOST_TEST(validSubTensorParents);
550}
551
arovir019e53a352018-08-31 15:26:35 +0100552BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100553{
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100554 ClSplitterMergerTest<armnn::DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100555}
556
557BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat16Workload)
558{
Nattapat Chaimanowong02f8bc12018-10-11 16:16:17 +0100559 ClSplitterMergerTest<armnn::DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100560}
561
562
telsoa014fcda012018-03-09 14:13:49 +0000563BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
564{
565 // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
telsoa01c577f2c2018-08-31 09:22:23 +0100566 // We create a splitter with two outputs. That each of those outputs is used by two different activation layers.
telsoa014fcda012018-03-09 14:13:49 +0000567
568 Graph graph;
569 ClWorkloadFactory factory;
Matthew Bentham29cadb32018-10-01 17:22:32 +0100570 std::unique_ptr<ClSplitterWorkload> wlSplitter;
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +0100571 std::unique_ptr<ClActivationWorkload> wlActiv0_0;
572 std::unique_ptr<ClActivationWorkload> wlActiv0_1;
573 std::unique_ptr<ClActivationWorkload> wlActiv1_0;
574 std::unique_ptr<ClActivationWorkload> wlActiv1_1;
telsoa014fcda012018-03-09 14:13:49 +0000575
Matthew Bentham29cadb32018-10-01 17:22:32 +0100576 CreateSplitterMultipleInputsOneOutputWorkloadTest<ClSplitterWorkload,
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +0100577 ClActivationWorkload, armnn::DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
telsoa01c577f2c2018-08-31 09:22:23 +0100578 wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000579
telsoa01c577f2c2018-08-31 09:22:23 +0100580 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000581 armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
582 armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
583 armnn::ClSubTensorHandle* activ0_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
584 armnn::ClSubTensorHandle* activ0_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
585 armnn::ClSubTensorHandle* activ1_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
586 armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
587
588
589 BOOST_TEST(sOut0);
590 BOOST_TEST(sOut1);
591 BOOST_TEST(activ0_0Im);
592 BOOST_TEST(activ0_1Im);
593 BOOST_TEST(activ1_0Im);
594 BOOST_TEST(activ1_1Im);
595
596 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
597 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
598
599 BOOST_TEST(validDataPointers);
600}
601
602BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl)
603{
604 ClWorkloadFactory factory;
telsoa01c577f2c2018-08-31 09:22:23 +0100605 CreateMemCopyWorkloads<IClTensorHandle>(factory);
telsoa014fcda012018-03-09 14:13:49 +0000606}
607
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100608template <typename L2NormalizationWorkloadType, typename armnn::DataType DataType>
609static void ClL2NormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000610{
telsoa01c577f2c2018-08-31 09:22:23 +0100611 Graph graph;
telsoa014fcda012018-03-09 14:13:49 +0000612 ClWorkloadFactory factory;
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100613 auto workload =
614 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000615
telsoa01c577f2c2018-08-31 09:22:23 +0100616 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000617 L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
618 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
619 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
620
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100621 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
622 ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
623 : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
624 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
625 ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
626 : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
627
628 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
629 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
telsoa014fcda012018-03-09 14:13:49 +0000630}
631
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100632BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload)
633{
634 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
635}
636
637BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNhwcWorkload)
638{
639 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
640}
641
642BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
643{
644 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
645}
646
647BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
648{
649 ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
650}
651
telsoa01c577f2c2018-08-31 09:22:23 +0100652template <typename LstmWorkloadType>
653static void ClCreateLstmWorkloadTest()
654{
655 Graph graph;
656 ClWorkloadFactory factory;
657 auto workload = CreateLstmWorkloadTest<LstmWorkloadType>(factory, graph);
658
659 LstmQueueDescriptor queueDescriptor = workload->GetData();
660 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
661 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
662 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 }));
663 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 }));
664}
665
arovir019e53a352018-08-31 15:26:35 +0100666BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100667{
arovir019e53a352018-08-31 15:26:35 +0100668 ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
telsoa01c577f2c2018-08-31 09:22:23 +0100669}
670
James Conroy074f3712018-10-03 09:32:03 +0100671template <typename ResizeBilinearWorkloadType, typename armnn::DataType DataType>
672static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
673{
674 Graph graph;
675 ClWorkloadFactory factory;
676
677 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
678
679 // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
680 ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
681 auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
682 auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
683
684 switch (dataLayout)
685 {
686 case DataLayout::NHWC:
687 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
688 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
689 break;
690 default: // NCHW
691 BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
692 BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
693 }
694}
695
696BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload)
697{
698 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
699}
700
701BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload)
702{
703 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
704}
705
706BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload)
707{
708 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
709}
710
711BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload)
712{
713 ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
714}
telsoa01c577f2c2018-08-31 09:22:23 +0100715
telsoa014fcda012018-03-09 14:13:49 +0000716BOOST_AUTO_TEST_SUITE_END()