blob: 19b520d1e9ef9ac7e97c71eb8421c58cac34a726 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
Aron Virginas-Tar56055192018-11-12 18:10:43 +00006#include "NeonWorkloadFactoryHelper.hpp"
7
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/MemCopyWorkload.hpp>
Aron Virginas-Tar3b278e92018-10-12 13:00:55 +01009
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <aclCommon/test/CreateWorkloadClNeon.hpp>
Aron Virginas-Tar3b278e92018-10-12 13:00:55 +010011
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <neon/NeonWorkloadFactory.hpp>
13#include <neon/NeonTensorHandle.hpp>
14#include <neon/workloads/NeonWorkloadUtils.hpp>
15#include <neon/workloads/NeonWorkloads.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
17BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon)
18
19namespace
20{
21
Derek Lambertic81855f2019-06-13 17:34:19 +010022bool TestNeonTensorHandleInfo(armnn::IAclTensorHandle* handle, const armnn::TensorInfo& expectedInfo)
telsoa014fcda012018-03-09 14:13:49 +000023{
24 using namespace armnn::armcomputetensorutils;
25
26 const arm_compute::ITensorInfo* handleInfo = handle->GetTensor().info();
27 const arm_compute::TensorInfo expectedAclInfo = BuildArmComputeTensorInfo(expectedInfo);
28
29 if (handleInfo->data_type() != expectedAclInfo.data_type())
30 {
31 return false;
32 }
33
34 if (handleInfo->num_dimensions() != expectedAclInfo.num_dimensions())
35 {
36 return false;
37 }
38
39 if (handleInfo->quantization_info() != expectedAclInfo.quantization_info())
40 {
41 return false;
42 }
43
44 for (std::size_t d = 0; d < expectedAclInfo.num_dimensions(); ++d)
45 {
46 if (handleInfo->dimension(d) != expectedAclInfo.dimension(d))
47 {
48 return false;
49 }
50 }
51
52 return true;
53}
54
55} // namespace
56
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010057template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +010058static void NeonCreateActivationWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000059{
60 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000061 NeonWorkloadFactory factory =
62 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
63
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010064 auto workload = CreateActivationWorkloadTest<NeonActivationWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000065
telsoa01c577f2c2018-08-31 09:22:23 +010066 // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000067 ActivationQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +010068 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
69 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +010070 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType)));
71 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +000072}
73
telsoa01c577f2c2018-08-31 09:22:23 +010074#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
75BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
76{
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010077 NeonCreateActivationWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +010078}
79#endif
80
arovir019e53a352018-08-31 15:26:35 +010081BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +010082{
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010083 NeonCreateActivationWorkloadTest<DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010084}
85
David Beckbc392452018-09-10 14:47:28 +010086template <typename WorkloadType,
87 typename DescriptorType,
88 typename LayerType,
89 armnn::DataType DataType>
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000090static void NeonCreateElementwiseWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000091{
David Beckbc392452018-09-10 14:47:28 +010092 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000093 NeonWorkloadFactory factory =
94 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
95
Éanna Ó Catháind57415d2018-11-28 16:24:38 +000096 auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000097
David Beckbc392452018-09-10 14:47:28 +010098 DescriptorType queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +010099 auto inputHandle1 = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
100 auto inputHandle2 = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
101 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100102 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType)));
103 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType)));
104 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000105}
106
telsoa01c577f2c2018-08-31 09:22:23 +0100107#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
108BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
109{
Matthew Bentham955258d2018-12-10 10:48:52 +0000110 NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
David Beckbc392452018-09-10 14:47:28 +0100111 AdditionQueueDescriptor,
112 AdditionLayer,
113 DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100114}
115#endif
116
arovir019e53a352018-08-31 15:26:35 +0100117BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100118{
Matthew Bentham955258d2018-12-10 10:48:52 +0000119 NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
David Beckbc392452018-09-10 14:47:28 +0100120 AdditionQueueDescriptor,
121 AdditionLayer,
122 DataType::Float32>();
123}
124
125#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
126BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
127{
Conor Kennedyb99480b2019-03-08 08:24:41 +0000128 NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
David Beckbc392452018-09-10 14:47:28 +0100129 SubtractionQueueDescriptor,
130 SubtractionLayer,
131 DataType::Float16>();
132}
133#endif
134
135BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
136{
Conor Kennedyb99480b2019-03-08 08:24:41 +0000137 NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
David Beckbc392452018-09-10 14:47:28 +0100138 SubtractionQueueDescriptor,
139 SubtractionLayer,
140 DataType::Float32>();
141}
142
Conor Kennedyb99480b2019-03-08 08:24:41 +0000143BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
144{
145 NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
146 SubtractionQueueDescriptor,
147 SubtractionLayer,
148 DataType::QuantisedAsymm8>();
149}
150
David Beckbc392452018-09-10 14:47:28 +0100151#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
152BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload)
153{
Conor Kennedyb99480b2019-03-08 08:24:41 +0000154 NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
David Beckbc392452018-09-10 14:47:28 +0100155 MultiplicationQueueDescriptor,
156 MultiplicationLayer,
157 DataType::Float16>();
158}
159#endif
160
161BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
162{
Conor Kennedyb99480b2019-03-08 08:24:41 +0000163 NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
David Beckbc392452018-09-10 14:47:28 +0100164 MultiplicationQueueDescriptor,
165 MultiplicationLayer,
166 DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100167}
168
Conor Kennedyb99480b2019-03-08 08:24:41 +0000169BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
170{
171 NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
172 MultiplicationQueueDescriptor,
173 MultiplicationLayer,
174 DataType::QuantisedAsymm8>();
175}
176
telsoa01c577f2c2018-08-31 09:22:23 +0100177template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
Nikhil Rajd1340932018-10-18 14:27:50 +0100178static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000179{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000180 Graph graph;
181 NeonWorkloadFactory factory =
182 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
183
Nikhil Rajd1340932018-10-18 14:27:50 +0100184 auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>
185 (factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000186
telsoa01c577f2c2018-08-31 09:22:23 +0100187 // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000188 BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100189 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
190 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Nikhil Rajd1340932018-10-18 14:27:50 +0100191
192 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
193 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
194
195 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
196 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000197}
198
telsoa01c577f2c2018-08-31 09:22:23 +0100199#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Nikhil Rajd1340932018-10-18 14:27:50 +0100200BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100201{
Matthew Benthamc48ac8c2018-12-12 16:15:59 +0000202 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NCHW);
Nikhil Rajd1340932018-10-18 14:27:50 +0100203}
204
205BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NhwcWorkload)
206{
Matthew Benthamc48ac8c2018-12-12 16:15:59 +0000207 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100208}
209#endif
210
Nikhil Rajd1340932018-10-18 14:27:50 +0100211BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100212{
Matthew Benthamc48ac8c2018-12-12 16:15:59 +0000213 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NCHW);
Nikhil Rajd1340932018-10-18 14:27:50 +0100214}
215
216BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNhwcWorkload)
217{
Matthew Benthamc48ac8c2018-12-12 16:15:59 +0000218 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100219}
220
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100221template <typename armnn::DataType DataType>
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100222static void NeonCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000223{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000224 Graph graph;
225 NeonWorkloadFactory factory =
226 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
227
228 auto workload = CreateConvolution2dWorkloadTest<NeonConvolution2dWorkload, DataType>(factory, graph, dataLayout);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100229
230 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
231 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
telsoa014fcda012018-03-09 14:13:49 +0000232
telsoa01c577f2c2018-08-31 09:22:23 +0100233 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000234 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100235 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
236 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100237 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
238 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000239}
240
telsoa01c577f2c2018-08-31 09:22:23 +0100241#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100242BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100243{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100244 NeonCreateConvolution2dWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100245}
telsoa01c577f2c2018-08-31 09:22:23 +0100246
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100247BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
248{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100249 NeonCreateConvolution2dWorkloadTest<DataType::Float16>(DataLayout::NHWC);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100250}
251
252#endif
253BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100254{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100255 NeonCreateConvolution2dWorkloadTest<DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100256}
257
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100258BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
259{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100260 NeonCreateConvolution2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100261}
262
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100263template <typename armnn::DataType DataType>
Nikhil Rajcec6b652018-10-12 13:51:57 +0100264static void NeonCreateDepthWiseConvolutionWorkloadTest(DataLayout dataLayout)
265{
266 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000267 NeonWorkloadFactory factory =
268 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
Nikhil Rajcec6b652018-10-12 13:51:57 +0100269
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100270 auto workload = CreateDepthwiseConvolution2dWorkloadTest<NeonDepthwiseConvolutionWorkload,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100271 DataType>(factory, graph, dataLayout);
272
273 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
274 DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100275 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
276 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100277
Mike Kellydb482882019-06-14 12:35:24 +0100278 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
279 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
280 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
281 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
Nikhil Rajcec6b652018-10-12 13:51:57 +0100282
283 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
284 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
285}
286
287BOOST_AUTO_TEST_CASE(CreateDepthWiseConvolution2dFloat32NhwcWorkload)
288{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100289 NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float32>(DataLayout::NHWC);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100290}
291
292#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
293BOOST_AUTO_TEST_CASE(CreateDepthWiseConvolution2dFloat16NhwcWorkload)
294{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100295 NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float16>(DataLayout::NHWC);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100296}
297#endif
298
telsoa01c577f2c2018-08-31 09:22:23 +0100299template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
300static void NeonCreateFullyConnectedWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000301{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000302 Graph graph;
303 NeonWorkloadFactory factory =
304 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
305
306 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000307
telsoa01c577f2c2018-08-31 09:22:23 +0100308 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000309 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100310 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
311 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100312 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType)));
313 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000314}
315
telsoa01c577f2c2018-08-31 09:22:23 +0100316#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
317BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16Workload)
318{
kevmay01e448be32018-09-26 10:21:55 +0100319 NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100320}
321#endif
322
arovir019e53a352018-08-31 15:26:35 +0100323BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100324{
kevmay01e448be32018-09-26 10:21:55 +0100325 NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100326}
327
telsoa01c577f2c2018-08-31 09:22:23 +0100328template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
narpra0155a97bc2018-10-02 14:35:53 +0100329static void NeonCreateNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000330{
narpra0155a97bc2018-10-02 14:35:53 +0100331 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000332 NeonWorkloadFactory factory =
333 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
334
narpra0155a97bc2018-10-02 14:35:53 +0100335 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000336
telsoa01c577f2c2018-08-31 09:22:23 +0100337 // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000338 NormalizationQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100339 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
340 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Matteo Martincigha160b242018-10-18 10:33:23 +0100341
342 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
343 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
344
345 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
346 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000347}
348
telsoa01c577f2c2018-08-31 09:22:23 +0100349#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
narpra0155a97bc2018-10-02 14:35:53 +0100350BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100351{
narpra0155a97bc2018-10-02 14:35:53 +0100352 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
353}
354
355BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
356{
357 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100358}
359#endif
360
narpra0155a97bc2018-10-02 14:35:53 +0100361BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100362{
narpra0155a97bc2018-10-02 14:35:53 +0100363 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100364}
365
narpra0155a97bc2018-10-02 14:35:53 +0100366BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNhwcWorkload)
367{
368 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
369}
370
371
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100372template <typename armnn::DataType DataType>
Nina Drozdb48e6862018-10-09 12:09:56 +0100373static void NeonCreatePooling2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000374{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375 Graph graph;
376 NeonWorkloadFactory factory =
377 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
378
379 auto workload = CreatePooling2dWorkloadTest<NeonPooling2dWorkload, DataType>(factory, graph, dataLayout);
Nina Drozdb48e6862018-10-09 12:09:56 +0100380
381 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2};
382 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2};
telsoa014fcda012018-03-09 14:13:49 +0000383
telsoa01c577f2c2018-08-31 09:22:23 +0100384 // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000385 Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100386 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
387 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Nina Drozdb48e6862018-10-09 12:09:56 +0100388 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
389 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000390}
391
telsoa01c577f2c2018-08-31 09:22:23 +0100392#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
393BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16Workload)
394{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100395 NeonCreatePooling2dWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100396}
397#endif
398
Nina Drozdb48e6862018-10-09 12:09:56 +0100399BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100400{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100401 NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100402}
403
Nina Drozdb48e6862018-10-09 12:09:56 +0100404BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100405{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100406 NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
Nina Drozdb48e6862018-10-09 12:09:56 +0100407}
408
409BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NchwWorkload)
410{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100411 NeonCreatePooling2dWorkloadTest<DataType::QuantisedAsymm8>(DataLayout::NCHW);
Nina Drozdb48e6862018-10-09 12:09:56 +0100412}
413
414BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
415{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100416 NeonCreatePooling2dWorkloadTest<DataType::QuantisedAsymm8>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100417}
418
Nikhil Raj9b461482019-07-03 15:58:31 +0100419static void NeonCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
420 const armnn::TensorShape& alphaShape,
421 const armnn::TensorShape& outputShape,
422 armnn::DataType dataType)
423{
424 Graph graph;
425 NeonWorkloadFactory factory =
426 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
427
428 auto workload = CreatePreluWorkloadTest<NeonPreluWorkload>(factory,
429 graph,
430 inputShape,
431 alphaShape,
432 outputShape,
433 dataType);
434
435 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
436 PreluQueueDescriptor queueDescriptor = workload->GetData();
437 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
438 auto alphaHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
439 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
440 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, dataType)));
441 BOOST_TEST(TestNeonTensorHandleInfo(alphaHandle, TensorInfo(alphaShape, dataType)));
442 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, dataType)));
443}
444
445#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
446 BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
447{
448 NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float16);
449}
450#endif
451
452BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
453{
454 NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float32);
455}
456
457BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
458{
459 NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QuantisedAsymm8);
460}
461
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100462template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100463static void NeonCreateReshapeWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000464{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000465 Graph graph;
466 NeonWorkloadFactory factory =
467 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
468
469 auto workload = CreateReshapeWorkloadTest<NeonReshapeWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000470
telsoa01c577f2c2018-08-31 09:22:23 +0100471 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000472 ReshapeQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100473 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
474 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100475 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
476 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000477}
478
telsoa01c577f2c2018-08-31 09:22:23 +0100479#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
480BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
481{
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100482 NeonCreateReshapeWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100483}
484#endif
485
arovir019e53a352018-08-31 15:26:35 +0100486BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000487{
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100488 NeonCreateReshapeWorkloadTest<DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000489}
490
491BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
492{
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100493 NeonCreateReshapeWorkloadTest<DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000494}
495
telsoa01c577f2c2018-08-31 09:22:23 +0100496template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
497static void NeonCreateSoftmaxWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000498{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000499 Graph graph;
500 NeonWorkloadFactory factory =
501 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
502
telsoa01c577f2c2018-08-31 09:22:23 +0100503 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000504
telsoa01c577f2c2018-08-31 09:22:23 +0100505 // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000506 SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100507 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
508 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100509 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
510 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({4, 1}, DataType)));
511}
512
513#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
514BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
515{
arovir019e53a352018-08-31 15:26:35 +0100516 NeonCreateSoftmaxWorkloadTest<NeonSoftmaxFloatWorkload, DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100517}
518#endif
519
arovir019e53a352018-08-31 15:26:35 +0100520BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100521{
arovir019e53a352018-08-31 15:26:35 +0100522 NeonCreateSoftmaxWorkloadTest<NeonSoftmaxFloatWorkload, DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000523}
524
525BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
526{
527 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000528 NeonWorkloadFactory factory =
529 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
530
Nattapat Chaimanowong14766d72018-10-12 15:09:53 +0100531 auto workload = CreateSplitterWorkloadTest<NeonSplitterWorkload, DataType::Float32>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000532
telsoa01c577f2c2018-08-31 09:22:23 +0100533 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000534 SplitterQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100535 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
surmeh013537c2c2018-05-18 16:31:43 +0100536 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32)));
537
Derek Lambertic81855f2019-06-13 17:34:19 +0100538 auto outputHandle0 = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
surmeh013537c2c2018-05-18 16:31:43 +0100539 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32)));
540
Derek Lambertic81855f2019-06-13 17:34:19 +0100541 auto outputHandle1 = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
surmeh013537c2c2018-05-18 16:31:43 +0100542 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32)));
543
Derek Lambertic81855f2019-06-13 17:34:19 +0100544 auto outputHandle2 = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
surmeh013537c2c2018-05-18 16:31:43 +0100545 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
telsoa014fcda012018-03-09 14:13:49 +0000546}
547
Jim Flynne242f2d2019-05-22 14:24:13 +0100548BOOST_AUTO_TEST_CASE(CreateSplitterConcat)
telsoa014fcda012018-03-09 14:13:49 +0000549{
telsoa01c577f2c2018-08-31 09:22:23 +0100550 // Tests that it is possible to decide which output of the splitter layer
Jim Flynne242f2d2019-05-22 14:24:13 +0100551 // should be lined to which input of the concat layer.
telsoa01c577f2c2018-08-31 09:22:23 +0100552 // We tested that is is possible to specify 0th output
Jim Flynne242f2d2019-05-22 14:24:13 +0100553 // of the splitter to be the 1st input to the concat, and the 1st output of the splitter to be 0th input
554 // of the concat.
telsoa014fcda012018-03-09 14:13:49 +0000555
556 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000557 NeonWorkloadFactory factory =
558 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
telsoa014fcda012018-03-09 14:13:49 +0000559
560 auto workloads =
Jim Flynne242f2d2019-05-22 14:24:13 +0100561 CreateSplitterConcatWorkloadTest<NeonSplitterWorkload, NeonConcatWorkload,
telsoa01c577f2c2018-08-31 09:22:23 +0100562 DataType::Float32>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000563
564 auto wlSplitter = std::move(workloads.first);
Jim Flynne242f2d2019-05-22 14:24:13 +0100565 auto wlConcat = std::move(workloads.second);
telsoa014fcda012018-03-09 14:13:49 +0000566
telsoa01c577f2c2018-08-31 09:22:23 +0100567 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
Derek Lambertic81855f2019-06-13 17:34:19 +0100568 armnn::IAclTensorHandle* sOut0 = dynamic_cast<armnn::IAclTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
569 armnn::IAclTensorHandle* sOut1 = dynamic_cast<armnn::IAclTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
570 armnn::IAclTensorHandle* mIn0 = dynamic_cast<armnn::IAclTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
571 armnn::IAclTensorHandle* mIn1 = dynamic_cast<armnn::IAclTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
telsoa014fcda012018-03-09 14:13:49 +0000572
573 BOOST_TEST(sOut0);
574 BOOST_TEST(sOut1);
575 BOOST_TEST(mIn0);
576 BOOST_TEST(mIn1);
577
578 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
579
580 BOOST_TEST(validDataPointers);
581}
582
583BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
584{
telsoa01c577f2c2018-08-31 09:22:23 +0100585 // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
586 // We created a splitter with two outputs. That each of those outputs is used by two different activation layers
telsoa014fcda012018-03-09 14:13:49 +0000587
588 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000589 NeonWorkloadFactory factory =
590 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
591
Nattapat Chaimanowong14766d72018-10-12 15:09:53 +0100592 std::unique_ptr<NeonSplitterWorkload> wlSplitter;
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +0100593 std::unique_ptr<NeonActivationWorkload> wlActiv0_0;
594 std::unique_ptr<NeonActivationWorkload> wlActiv0_1;
595 std::unique_ptr<NeonActivationWorkload> wlActiv1_0;
596 std::unique_ptr<NeonActivationWorkload> wlActiv1_1;
telsoa014fcda012018-03-09 14:13:49 +0000597
Nattapat Chaimanowong14766d72018-10-12 15:09:53 +0100598 CreateSplitterMultipleInputsOneOutputWorkloadTest<NeonSplitterWorkload,
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +0100599 NeonActivationWorkload, DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
600 wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000601
Derek Lambertic81855f2019-06-13 17:34:19 +0100602 armnn::IAclTensorHandle* sOut0 = dynamic_cast<armnn::IAclTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
603 armnn::IAclTensorHandle* sOut1 = dynamic_cast<armnn::IAclTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
604 armnn::IAclTensorHandle* activ0_0Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
605 armnn::IAclTensorHandle* activ0_1Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
606 armnn::IAclTensorHandle* activ1_0Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
607 armnn::IAclTensorHandle* activ1_1Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000608
609
610 BOOST_TEST(sOut0);
611 BOOST_TEST(sOut1);
612 BOOST_TEST(activ0_0Im);
613 BOOST_TEST(activ0_1Im);
614 BOOST_TEST(activ1_0Im);
615 BOOST_TEST(activ1_1Im);
616
617 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
618 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
619
620 BOOST_TEST(validDataPointers);
621}
622
623BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsNeon)
624{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000625 NeonWorkloadFactory factory =
626 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
Derek Lambertic81855f2019-06-13 17:34:19 +0100627 CreateMemCopyWorkloads<IAclTensorHandle>(factory);
telsoa014fcda012018-03-09 14:13:49 +0000628}
629
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100630template <typename L2NormalizationWorkloadType, typename armnn::DataType DataType>
631static void NeonCreateL2NormalizationWorkloadTest(DataLayout dataLayout)
632{
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100633 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000634 NeonWorkloadFactory factory =
635 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
636
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100637 auto workload =
638 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100639
640 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
641 L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100642 auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
643 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100644
645 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
646 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
647 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
648 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
649
650 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
651 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100652}
653
654#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
655BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
656{
657 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
658}
659
660BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
661{
662 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
663}
664#endif
665
666BOOST_AUTO_TEST_CASE(CreateL2NormalizationNchwWorkload)
667{
668 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
669}
670
671BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload)
672{
673 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
674}
675
Jim Flynne242f2d2019-05-22 14:24:13 +0100676template <typename ConcatWorkloadType, armnn::DataType DataType>
677static void NeonCreateConcatWorkloadTest(std::initializer_list<unsigned int> outputShape,
narpra015cdda352018-11-19 15:30:27 +0000678 unsigned int concatAxis)
679{
680 Graph graph;
681 NeonWorkloadFactory factory =
682 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
683
Jim Flynne242f2d2019-05-22 14:24:13 +0100684 auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
narpra015cdda352018-11-19 15:30:27 +0000685
Jim Flynne242f2d2019-05-22 14:24:13 +0100686 ConcatQueueDescriptor queueDescriptor = workload->GetData();
Derek Lambertic81855f2019-06-13 17:34:19 +0100687 auto inputHandle0 = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
688 auto inputHandle1 = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
689 auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
narpra015cdda352018-11-19 15:30:27 +0000690
691 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType)));
692 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType)));
693 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
694}
695
Jim Flynne242f2d2019-05-22 14:24:13 +0100696BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000697{
Jim Flynne242f2d2019-05-22 14:24:13 +0100698 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
narpra015cdda352018-11-19 15:30:27 +0000699}
700
Jim Flynne242f2d2019-05-22 14:24:13 +0100701BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000702{
Jim Flynne242f2d2019-05-22 14:24:13 +0100703 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
narpra015cdda352018-11-19 15:30:27 +0000704}
705
Jim Flynne242f2d2019-05-22 14:24:13 +0100706BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
narpra015cdda352018-11-19 15:30:27 +0000707{
Jim Flynne242f2d2019-05-22 14:24:13 +0100708 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
narpra015cdda352018-11-19 15:30:27 +0000709}
710
Jim Flynne242f2d2019-05-22 14:24:13 +0100711BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
narpra0163b08822018-11-20 11:29:12 +0000712{
Jim Flynne242f2d2019-05-22 14:24:13 +0100713 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
narpra0163b08822018-11-20 11:29:12 +0000714}
715
Jim Flynne242f2d2019-05-22 14:24:13 +0100716BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
narpra0163b08822018-11-20 11:29:12 +0000717{
Jim Flynne242f2d2019-05-22 14:24:13 +0100718 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
narpra0163b08822018-11-20 11:29:12 +0000719}
720
Jim Flynne242f2d2019-05-22 14:24:13 +0100721BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
narpra0163b08822018-11-20 11:29:12 +0000722{
Jim Flynne242f2d2019-05-22 14:24:13 +0100723 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
narpra0163b08822018-11-20 11:29:12 +0000724}
725
telsoa014fcda012018-03-09 14:13:49 +0000726BOOST_AUTO_TEST_SUITE_END()