blob: 120125311e9a1db0dc2d10b6ee622807cf5b8d66 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
Aron Virginas-Tar56055192018-11-12 18:10:43 +00006#include "NeonWorkloadFactoryHelper.hpp"
7
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/MemCopyWorkload.hpp>
Aron Virginas-Tar3b278e92018-10-12 13:00:55 +01009
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <aclCommon/test/CreateWorkloadClNeon.hpp>
Aron Virginas-Tar3b278e92018-10-12 13:00:55 +010011
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <neon/NeonWorkloadFactory.hpp>
13#include <neon/NeonTensorHandle.hpp>
14#include <neon/workloads/NeonWorkloadUtils.hpp>
15#include <neon/workloads/NeonWorkloads.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
17BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon)
18
19namespace
20{
21
22bool TestNeonTensorHandleInfo(armnn::INeonTensorHandle* handle, const armnn::TensorInfo& expectedInfo)
23{
24 using namespace armnn::armcomputetensorutils;
25
26 const arm_compute::ITensorInfo* handleInfo = handle->GetTensor().info();
27 const arm_compute::TensorInfo expectedAclInfo = BuildArmComputeTensorInfo(expectedInfo);
28
29 if (handleInfo->data_type() != expectedAclInfo.data_type())
30 {
31 return false;
32 }
33
34 if (handleInfo->num_dimensions() != expectedAclInfo.num_dimensions())
35 {
36 return false;
37 }
38
39 if (handleInfo->quantization_info() != expectedAclInfo.quantization_info())
40 {
41 return false;
42 }
43
44 for (std::size_t d = 0; d < expectedAclInfo.num_dimensions(); ++d)
45 {
46 if (handleInfo->dimension(d) != expectedAclInfo.dimension(d))
47 {
48 return false;
49 }
50 }
51
52 return true;
53}
54
55} // namespace
56
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010057template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +010058static void NeonCreateActivationWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000059{
60 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000061 NeonWorkloadFactory factory =
62 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
63
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010064 auto workload = CreateActivationWorkloadTest<NeonActivationWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000065
telsoa01c577f2c2018-08-31 09:22:23 +010066 // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000067 ActivationQueueDescriptor queueDescriptor = workload->GetData();
68 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
69 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +010070 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType)));
71 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +000072}
73
telsoa01c577f2c2018-08-31 09:22:23 +010074#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
75BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
76{
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010077 NeonCreateActivationWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +010078}
79#endif
80
arovir019e53a352018-08-31 15:26:35 +010081BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +010082{
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010083 NeonCreateActivationWorkloadTest<DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010084}
85
David Beckbc392452018-09-10 14:47:28 +010086template <typename WorkloadType,
87 typename DescriptorType,
88 typename LayerType,
89 armnn::DataType DataType>
90static void NeonCreateArithmethicWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000091{
David Beckbc392452018-09-10 14:47:28 +010092 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000093 NeonWorkloadFactory factory =
94 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
95
David Beckbc392452018-09-10 14:47:28 +010096 auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000097
David Beckbc392452018-09-10 14:47:28 +010098 DescriptorType queueDescriptor = workload->GetData();
telsoa014fcda012018-03-09 14:13:49 +000099 auto inputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
100 auto inputHandle2 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[1]);
101 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100102 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType)));
103 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType)));
104 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000105}
106
telsoa01c577f2c2018-08-31 09:22:23 +0100107#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
108BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
109{
David Beckbc392452018-09-10 14:47:28 +0100110 NeonCreateArithmethicWorkloadTest<NeonAdditionFloatWorkload,
111 AdditionQueueDescriptor,
112 AdditionLayer,
113 DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100114}
115#endif
116
arovir019e53a352018-08-31 15:26:35 +0100117BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100118{
David Beckbc392452018-09-10 14:47:28 +0100119 NeonCreateArithmethicWorkloadTest<NeonAdditionFloatWorkload,
120 AdditionQueueDescriptor,
121 AdditionLayer,
122 DataType::Float32>();
123}
124
125#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
126BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
127{
128 NeonCreateArithmethicWorkloadTest<NeonSubtractionFloatWorkload,
129 SubtractionQueueDescriptor,
130 SubtractionLayer,
131 DataType::Float16>();
132}
133#endif
134
135BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
136{
137 NeonCreateArithmethicWorkloadTest<NeonSubtractionFloatWorkload,
138 SubtractionQueueDescriptor,
139 SubtractionLayer,
140 DataType::Float32>();
141}
142
143#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
144BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload)
145{
146 NeonCreateArithmethicWorkloadTest<NeonMultiplicationFloatWorkload,
147 MultiplicationQueueDescriptor,
148 MultiplicationLayer,
149 DataType::Float16>();
150}
151#endif
152
153BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
154{
155 NeonCreateArithmethicWorkloadTest<NeonMultiplicationFloatWorkload,
156 MultiplicationQueueDescriptor,
157 MultiplicationLayer,
158 DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100159}
160
161template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
Nikhil Rajd1340932018-10-18 14:27:50 +0100162static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000163{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000164 Graph graph;
165 NeonWorkloadFactory factory =
166 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
167
Nikhil Rajd1340932018-10-18 14:27:50 +0100168 auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>
169 (factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000170
telsoa01c577f2c2018-08-31 09:22:23 +0100171 // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000172 BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
173 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
174 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
Nikhil Rajd1340932018-10-18 14:27:50 +0100175
176 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
177 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
178
179 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
180 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000181}
182
telsoa01c577f2c2018-08-31 09:22:23 +0100183#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Nikhil Rajd1340932018-10-18 14:27:50 +0100184BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100185{
Nikhil Rajd1340932018-10-18 14:27:50 +0100186 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
187}
188
189BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NhwcWorkload)
190{
191 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100192}
193#endif
194
Nikhil Rajd1340932018-10-18 14:27:50 +0100195BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100196{
Nikhil Rajd1340932018-10-18 14:27:50 +0100197 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
198}
199
200BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNhwcWorkload)
201{
202 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100203}
204
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100205template <typename armnn::DataType DataType>
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100206static void NeonCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000207{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000208 Graph graph;
209 NeonWorkloadFactory factory =
210 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
211
212 auto workload = CreateConvolution2dWorkloadTest<NeonConvolution2dWorkload, DataType>(factory, graph, dataLayout);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100213
214 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
215 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
telsoa014fcda012018-03-09 14:13:49 +0000216
telsoa01c577f2c2018-08-31 09:22:23 +0100217 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000218 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
219 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
220 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100221 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
222 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000223}
224
telsoa01c577f2c2018-08-31 09:22:23 +0100225#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100226BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100227{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100228 NeonCreateConvolution2dWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100229}
telsoa01c577f2c2018-08-31 09:22:23 +0100230
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100231BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
232{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100233 NeonCreateConvolution2dWorkloadTest<DataType::Float16>(DataLayout::NHWC);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100234}
235
236#endif
237BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100238{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100239 NeonCreateConvolution2dWorkloadTest<DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100240}
241
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100242BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
243{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100244 NeonCreateConvolution2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100245}
246
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100247template <typename armnn::DataType DataType>
Nikhil Rajcec6b652018-10-12 13:51:57 +0100248static void NeonCreateDepthWiseConvolutionWorkloadTest(DataLayout dataLayout)
249{
250 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000251 NeonWorkloadFactory factory =
252 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
Nikhil Rajcec6b652018-10-12 13:51:57 +0100253
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100254 auto workload = CreateDepthwiseConvolution2dWorkloadTest<NeonDepthwiseConvolutionWorkload,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100255 DataType>(factory, graph, dataLayout);
256
257 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
258 DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
259 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
260 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
261
262 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
263 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
264 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
265 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
266 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
267 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
268
269 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
270 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
271}
272
273BOOST_AUTO_TEST_CASE(CreateDepthWiseConvolution2dFloat32NhwcWorkload)
274{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100275 NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float32>(DataLayout::NHWC);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100276}
277
278#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
279BOOST_AUTO_TEST_CASE(CreateDepthWiseConvolution2dFloat16NhwcWorkload)
280{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100281 NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float16>(DataLayout::NHWC);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100282}
283#endif
284
telsoa01c577f2c2018-08-31 09:22:23 +0100285template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
286static void NeonCreateFullyConnectedWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000287{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000288 Graph graph;
289 NeonWorkloadFactory factory =
290 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
291
292 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000293
telsoa01c577f2c2018-08-31 09:22:23 +0100294 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000295 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
296 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
297 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100298 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType)));
299 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000300}
301
telsoa01c577f2c2018-08-31 09:22:23 +0100302#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
303BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16Workload)
304{
kevmay01e448be32018-09-26 10:21:55 +0100305 NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100306}
307#endif
308
arovir019e53a352018-08-31 15:26:35 +0100309BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100310{
kevmay01e448be32018-09-26 10:21:55 +0100311 NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100312}
313
telsoa01c577f2c2018-08-31 09:22:23 +0100314template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
narpra0155a97bc2018-10-02 14:35:53 +0100315static void NeonCreateNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000316{
narpra0155a97bc2018-10-02 14:35:53 +0100317 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000318 NeonWorkloadFactory factory =
319 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
320
narpra0155a97bc2018-10-02 14:35:53 +0100321 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000322
telsoa01c577f2c2018-08-31 09:22:23 +0100323 // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000324 NormalizationQueueDescriptor queueDescriptor = workload->GetData();
325 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
326 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
Matteo Martincigha160b242018-10-18 10:33:23 +0100327
328 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
329 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
330
331 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
332 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000333}
334
telsoa01c577f2c2018-08-31 09:22:23 +0100335#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
narpra0155a97bc2018-10-02 14:35:53 +0100336BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100337{
narpra0155a97bc2018-10-02 14:35:53 +0100338 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
339}
340
341BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
342{
343 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100344}
345#endif
346
narpra0155a97bc2018-10-02 14:35:53 +0100347BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100348{
narpra0155a97bc2018-10-02 14:35:53 +0100349 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100350}
351
narpra0155a97bc2018-10-02 14:35:53 +0100352BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNhwcWorkload)
353{
354 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
355}
356
357
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100358template <typename armnn::DataType DataType>
Nina Drozdb48e6862018-10-09 12:09:56 +0100359static void NeonCreatePooling2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000360{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000361 Graph graph;
362 NeonWorkloadFactory factory =
363 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
364
365 auto workload = CreatePooling2dWorkloadTest<NeonPooling2dWorkload, DataType>(factory, graph, dataLayout);
Nina Drozdb48e6862018-10-09 12:09:56 +0100366
367 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2};
368 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2};
telsoa014fcda012018-03-09 14:13:49 +0000369
telsoa01c577f2c2018-08-31 09:22:23 +0100370 // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000371 Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
372 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
373 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
Nina Drozdb48e6862018-10-09 12:09:56 +0100374 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
375 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000376}
377
telsoa01c577f2c2018-08-31 09:22:23 +0100378#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
379BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16Workload)
380{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100381 NeonCreatePooling2dWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100382}
383#endif
384
Nina Drozdb48e6862018-10-09 12:09:56 +0100385BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100386{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100387 NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100388}
389
Nina Drozdb48e6862018-10-09 12:09:56 +0100390BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100391{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100392 NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
Nina Drozdb48e6862018-10-09 12:09:56 +0100393}
394
395BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NchwWorkload)
396{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100397 NeonCreatePooling2dWorkloadTest<DataType::QuantisedAsymm8>(DataLayout::NCHW);
Nina Drozdb48e6862018-10-09 12:09:56 +0100398}
399
400BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
401{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100402 NeonCreatePooling2dWorkloadTest<DataType::QuantisedAsymm8>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100403}
404
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100405template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100406static void NeonCreateReshapeWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000407{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000408 Graph graph;
409 NeonWorkloadFactory factory =
410 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
411
412 auto workload = CreateReshapeWorkloadTest<NeonReshapeWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000413
telsoa01c577f2c2018-08-31 09:22:23 +0100414 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000415 ReshapeQueueDescriptor queueDescriptor = workload->GetData();
416 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
417 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100418 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
419 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000420}
421
telsoa01c577f2c2018-08-31 09:22:23 +0100422#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
423BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
424{
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100425 NeonCreateReshapeWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100426}
427#endif
428
arovir019e53a352018-08-31 15:26:35 +0100429BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
telsoa014fcda012018-03-09 14:13:49 +0000430{
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100431 NeonCreateReshapeWorkloadTest<DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000432}
433
434BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
435{
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100436 NeonCreateReshapeWorkloadTest<DataType::QuantisedAsymm8>();
telsoa014fcda012018-03-09 14:13:49 +0000437}
438
telsoa01c577f2c2018-08-31 09:22:23 +0100439template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
440static void NeonCreateSoftmaxWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000441{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000442 Graph graph;
443 NeonWorkloadFactory factory =
444 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
445
telsoa01c577f2c2018-08-31 09:22:23 +0100446 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000447
telsoa01c577f2c2018-08-31 09:22:23 +0100448 // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000449 SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
450 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
451 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
telsoa01c577f2c2018-08-31 09:22:23 +0100452 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
453 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({4, 1}, DataType)));
454}
455
456#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
457BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
458{
arovir019e53a352018-08-31 15:26:35 +0100459 NeonCreateSoftmaxWorkloadTest<NeonSoftmaxFloatWorkload, DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100460}
461#endif
462
arovir019e53a352018-08-31 15:26:35 +0100463BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkload)
telsoa01c577f2c2018-08-31 09:22:23 +0100464{
arovir019e53a352018-08-31 15:26:35 +0100465 NeonCreateSoftmaxWorkloadTest<NeonSoftmaxFloatWorkload, DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000466}
467
468BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
469{
470 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000471 NeonWorkloadFactory factory =
472 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
473
Nattapat Chaimanowong14766d72018-10-12 15:09:53 +0100474 auto workload = CreateSplitterWorkloadTest<NeonSplitterWorkload, DataType::Float32>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000475
telsoa01c577f2c2018-08-31 09:22:23 +0100476 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000477 SplitterQueueDescriptor queueDescriptor = workload->GetData();
478 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
surmeh013537c2c2018-05-18 16:31:43 +0100479 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32)));
480
telsoa014fcda012018-03-09 14:13:49 +0000481 auto outputHandle0 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
surmeh013537c2c2018-05-18 16:31:43 +0100482 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32)));
483
telsoa014fcda012018-03-09 14:13:49 +0000484 auto outputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[1]);
surmeh013537c2c2018-05-18 16:31:43 +0100485 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32)));
486
telsoa014fcda012018-03-09 14:13:49 +0000487 auto outputHandle2 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[2]);
surmeh013537c2c2018-05-18 16:31:43 +0100488 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
telsoa014fcda012018-03-09 14:13:49 +0000489}
490
491BOOST_AUTO_TEST_CASE(CreateSplitterMerger)
492{
telsoa01c577f2c2018-08-31 09:22:23 +0100493 // Tests that it is possible to decide which output of the splitter layer
494 // should be lined to which input of the merger layer.
495 // We tested that is is possible to specify 0th output
496 // of the splitter to be the 1st input to the merger, and the 1st output of the splitter to be 0th input
telsoa014fcda012018-03-09 14:13:49 +0000497 // of the merger.
498
499 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000500 NeonWorkloadFactory factory =
501 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
telsoa014fcda012018-03-09 14:13:49 +0000502
503 auto workloads =
Nattapat Chaimanowong14766d72018-10-12 15:09:53 +0100504 CreateSplitterMergerWorkloadTest<NeonSplitterWorkload, NeonMergerWorkload,
telsoa01c577f2c2018-08-31 09:22:23 +0100505 DataType::Float32>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000506
507 auto wlSplitter = std::move(workloads.first);
508 auto wlMerger = std::move(workloads.second);
509
telsoa01c577f2c2018-08-31 09:22:23 +0100510 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
telsoa014fcda012018-03-09 14:13:49 +0000511 armnn::INeonTensorHandle* sOut0 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
512 armnn::INeonTensorHandle* sOut1 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
513 armnn::INeonTensorHandle* mIn0 = dynamic_cast<armnn::INeonTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
514 armnn::INeonTensorHandle* mIn1 = dynamic_cast<armnn::INeonTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
515
516 BOOST_TEST(sOut0);
517 BOOST_TEST(sOut1);
518 BOOST_TEST(mIn0);
519 BOOST_TEST(mIn1);
520
521 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
522
523 BOOST_TEST(validDataPointers);
524}
525
526BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
527{
telsoa01c577f2c2018-08-31 09:22:23 +0100528 // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
529 // We created a splitter with two outputs. That each of those outputs is used by two different activation layers
telsoa014fcda012018-03-09 14:13:49 +0000530
531 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000532 NeonWorkloadFactory factory =
533 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
534
Nattapat Chaimanowong14766d72018-10-12 15:09:53 +0100535 std::unique_ptr<NeonSplitterWorkload> wlSplitter;
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +0100536 std::unique_ptr<NeonActivationWorkload> wlActiv0_0;
537 std::unique_ptr<NeonActivationWorkload> wlActiv0_1;
538 std::unique_ptr<NeonActivationWorkload> wlActiv1_0;
539 std::unique_ptr<NeonActivationWorkload> wlActiv1_1;
telsoa014fcda012018-03-09 14:13:49 +0000540
Nattapat Chaimanowong14766d72018-10-12 15:09:53 +0100541 CreateSplitterMultipleInputsOneOutputWorkloadTest<NeonSplitterWorkload,
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +0100542 NeonActivationWorkload, DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
543 wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000544
545 armnn::INeonTensorHandle* sOut0 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
546 armnn::INeonTensorHandle* sOut1 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
547 armnn::INeonTensorHandle* activ0_0Im = dynamic_cast<armnn::INeonTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
548 armnn::INeonTensorHandle* activ0_1Im = dynamic_cast<armnn::INeonTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
549 armnn::INeonTensorHandle* activ1_0Im = dynamic_cast<armnn::INeonTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
550 armnn::INeonTensorHandle* activ1_1Im = dynamic_cast<armnn::INeonTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
551
552
553 BOOST_TEST(sOut0);
554 BOOST_TEST(sOut1);
555 BOOST_TEST(activ0_0Im);
556 BOOST_TEST(activ0_1Im);
557 BOOST_TEST(activ1_0Im);
558 BOOST_TEST(activ1_1Im);
559
560 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
561 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
562
563 BOOST_TEST(validDataPointers);
564}
565
566BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsNeon)
567{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000568 NeonWorkloadFactory factory =
569 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
telsoa01c577f2c2018-08-31 09:22:23 +0100570 CreateMemCopyWorkloads<INeonTensorHandle>(factory);
telsoa014fcda012018-03-09 14:13:49 +0000571}
572
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100573template <typename L2NormalizationWorkloadType, typename armnn::DataType DataType>
574static void NeonCreateL2NormalizationWorkloadTest(DataLayout dataLayout)
575{
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100576 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000577 NeonWorkloadFactory factory =
578 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
579
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100580 auto workload =
581 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100582
583 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
584 L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
585 auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
586 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100587
588 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
589 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
590 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
591 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
592
593 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
594 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100595}
596
597#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
598BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
599{
600 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
601}
602
603BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
604{
605 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
606}
607#endif
608
609BOOST_AUTO_TEST_CASE(CreateL2NormalizationNchwWorkload)
610{
611 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
612}
613
614BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload)
615{
616 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
617}
618
narpra015cdda352018-11-19 15:30:27 +0000619template <typename MergerWorkloadType, armnn::DataType DataType>
620static void NeonCreateMergerWorkloadTest(std::initializer_list<unsigned int> outputShape,
621 unsigned int concatAxis)
622{
623 Graph graph;
624 NeonWorkloadFactory factory =
625 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
626
627 auto workload = CreateMergerWorkloadTest<MergerWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
628
629 MergerQueueDescriptor queueDescriptor = workload->GetData();
630 auto inputHandle0 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
631 auto inputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[1]);
632 auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
633
634 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType)));
635 BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType)));
636 BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
637}
638
639BOOST_AUTO_TEST_CASE(CreateMergerDim0Float32Workload)
640{
641 NeonCreateMergerWorkloadTest<NeonMergerWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
642}
643
644BOOST_AUTO_TEST_CASE(CreateMergerDim1Float32Workload)
645{
646 NeonCreateMergerWorkloadTest<NeonMergerWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
647}
648
649BOOST_AUTO_TEST_CASE(CreateMergerDim3Float32Workload)
650{
651 NeonCreateMergerWorkloadTest<NeonMergerWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
652}
653
narpra0163b08822018-11-20 11:29:12 +0000654BOOST_AUTO_TEST_CASE(CreateMergerDim0Uint8Workload)
655{
656 NeonCreateMergerWorkloadTest<NeonMergerWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
657}
658
659BOOST_AUTO_TEST_CASE(CreateMergerDim1Uint8Workload)
660{
661 NeonCreateMergerWorkloadTest<NeonMergerWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
662}
663
664BOOST_AUTO_TEST_CASE(CreateMergerDim3Uint8Workload)
665{
666 NeonCreateMergerWorkloadTest<NeonMergerWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
667}
668
telsoa014fcda012018-03-09 14:13:49 +0000669BOOST_AUTO_TEST_SUITE_END()