blob: 66718cc481a7daaf89a82eb6d33cf1cb6d7b8f65 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Keith Davis69e653f2020-07-02 11:49:26 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
arovir0143095f32018-10-09 18:04:24 +01005
Aron Virginas-Tar56055192018-11-12 18:10:43 +00006#include "NeonWorkloadFactoryHelper.hpp"
7
Francis Murtagh4fc3c482019-08-02 13:20:54 +01008#include <aclCommon/ArmComputeTensorUtils.hpp>
Sadik Armagan04a72972020-09-14 15:44:18 +01009#include <armnn/utility/Assert.hpp>
10#include <armnn/utility/IgnoreUnused.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010011#include <armnn/utility/PolymorphicDowncast.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000012#include <armnn/backends/MemCopyWorkload.hpp>
Aron Virginas-Tar3b278e92018-10-12 13:00:55 +010013
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000014#include <aclCommon/test/CreateWorkloadClNeon.hpp>
Aron Virginas-Tar3b278e92018-10-12 13:00:55 +010015
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016#include <neon/NeonWorkloadFactory.hpp>
17#include <neon/NeonTensorHandle.hpp>
18#include <neon/workloads/NeonWorkloadUtils.hpp>
19#include <neon/workloads/NeonWorkloads.hpp>
telsoa014fcda012018-03-09 14:13:49 +000020
Sadik Armagan1625efc2021-06-10 18:24:34 +010021#include <doctest/doctest.h>
telsoa014fcda012018-03-09 14:13:49 +000022
Sadik Armagan1625efc2021-06-10 18:24:34 +010023TEST_SUITE("CreateWorkloadNeon")
24{
telsoa014fcda012018-03-09 14:13:49 +000025namespace
26{
27
Colm Donelan25ab3a82021-05-17 13:01:52 +010028armnn::PredicateResult CompareIAclTensorHandleShape(IAclTensorHandle* tensorHandle,
29 std::initializer_list<unsigned int> expectedDimensions)
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +010030{
31 return CompareTensorHandleShape<IAclTensorHandle>(tensorHandle, expectedDimensions);
32}
33
Derek Lambertic81855f2019-06-13 17:34:19 +010034bool TestNeonTensorHandleInfo(armnn::IAclTensorHandle* handle, const armnn::TensorInfo& expectedInfo)
telsoa014fcda012018-03-09 14:13:49 +000035{
36 using namespace armnn::armcomputetensorutils;
37
38 const arm_compute::ITensorInfo* handleInfo = handle->GetTensor().info();
39 const arm_compute::TensorInfo expectedAclInfo = BuildArmComputeTensorInfo(expectedInfo);
40
41 if (handleInfo->data_type() != expectedAclInfo.data_type())
42 {
43 return false;
44 }
45
46 if (handleInfo->num_dimensions() != expectedAclInfo.num_dimensions())
47 {
48 return false;
49 }
50
51 if (handleInfo->quantization_info() != expectedAclInfo.quantization_info())
52 {
53 return false;
54 }
55
56 for (std::size_t d = 0; d < expectedAclInfo.num_dimensions(); ++d)
57 {
58 if (handleInfo->dimension(d) != expectedAclInfo.dimension(d))
59 {
60 return false;
61 }
62 }
63
64 return true;
65}
66
67} // namespace
68
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010069template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +010070static void NeonCreateActivationWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +000071{
72 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000073 NeonWorkloadFactory factory =
74 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
75
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010076 auto workload = CreateActivationWorkloadTest<NeonActivationWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +000077
telsoa01c577f2c2018-08-31 09:22:23 +010078 // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +000079 ActivationQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +010080 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
81 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +010082 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType)));
83 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +000084}
85
telsoa01c577f2c2018-08-31 09:22:23 +010086#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +010087TEST_CASE("CreateActivationFloat16Workload")
telsoa01c577f2c2018-08-31 09:22:23 +010088{
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010089 NeonCreateActivationWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +010090}
91#endif
92
Sadik Armagan1625efc2021-06-10 18:24:34 +010093TEST_CASE("CreateActivationFloatWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +010094{
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010095 NeonCreateActivationWorkloadTest<DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +010096}
97
David Beckbc392452018-09-10 14:47:28 +010098template <typename WorkloadType,
99 typename DescriptorType,
100 typename LayerType,
101 armnn::DataType DataType>
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000102static void NeonCreateElementwiseWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000103{
David Beckbc392452018-09-10 14:47:28 +0100104 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000105 NeonWorkloadFactory factory =
106 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
107
Éanna Ó Catháind57415d2018-11-28 16:24:38 +0000108 auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000109
David Beckbc392452018-09-10 14:47:28 +0100110 DescriptorType queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100111 auto inputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
112 auto inputHandle2 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
113 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100114 CHECK(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType)));
115 CHECK(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType)));
116 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000117}
118
telsoa01c577f2c2018-08-31 09:22:23 +0100119#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100120TEST_CASE("CreateAdditionFloat16Workload")
telsoa01c577f2c2018-08-31 09:22:23 +0100121{
Matthew Bentham955258d2018-12-10 10:48:52 +0000122 NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
David Beckbc392452018-09-10 14:47:28 +0100123 AdditionQueueDescriptor,
124 AdditionLayer,
125 DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100126}
127#endif
128
Sadik Armagan1625efc2021-06-10 18:24:34 +0100129TEST_CASE("CreateAdditionFloatWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100130{
Matthew Bentham955258d2018-12-10 10:48:52 +0000131 NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
David Beckbc392452018-09-10 14:47:28 +0100132 AdditionQueueDescriptor,
133 AdditionLayer,
134 DataType::Float32>();
135}
136
137#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100138TEST_CASE("CreateSubtractionFloat16Workload")
David Beckbc392452018-09-10 14:47:28 +0100139{
Conor Kennedyb99480b2019-03-08 08:24:41 +0000140 NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
David Beckbc392452018-09-10 14:47:28 +0100141 SubtractionQueueDescriptor,
142 SubtractionLayer,
143 DataType::Float16>();
144}
145#endif
146
Sadik Armagan1625efc2021-06-10 18:24:34 +0100147TEST_CASE("CreateSubtractionFloatWorkload")
David Beckbc392452018-09-10 14:47:28 +0100148{
Conor Kennedyb99480b2019-03-08 08:24:41 +0000149 NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
David Beckbc392452018-09-10 14:47:28 +0100150 SubtractionQueueDescriptor,
151 SubtractionLayer,
152 DataType::Float32>();
153}
154
Sadik Armagan1625efc2021-06-10 18:24:34 +0100155TEST_CASE("CreateSubtractionUint8Workload")
Conor Kennedyb99480b2019-03-08 08:24:41 +0000156{
157 NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
158 SubtractionQueueDescriptor,
159 SubtractionLayer,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000160 DataType::QAsymmU8>();
Conor Kennedyb99480b2019-03-08 08:24:41 +0000161}
162
David Beckbc392452018-09-10 14:47:28 +0100163#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100164TEST_CASE("CreateMultiplicationFloat16Workload")
David Beckbc392452018-09-10 14:47:28 +0100165{
Conor Kennedyb99480b2019-03-08 08:24:41 +0000166 NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
David Beckbc392452018-09-10 14:47:28 +0100167 MultiplicationQueueDescriptor,
168 MultiplicationLayer,
169 DataType::Float16>();
170}
171#endif
172
Sadik Armagan1625efc2021-06-10 18:24:34 +0100173TEST_CASE("CreateMultiplicationFloatWorkload")
David Beckbc392452018-09-10 14:47:28 +0100174{
Conor Kennedyb99480b2019-03-08 08:24:41 +0000175 NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
David Beckbc392452018-09-10 14:47:28 +0100176 MultiplicationQueueDescriptor,
177 MultiplicationLayer,
178 DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100179}
180
Sadik Armagan1625efc2021-06-10 18:24:34 +0100181TEST_CASE("CreateMultiplicationUint8Workload")
Conor Kennedyb99480b2019-03-08 08:24:41 +0000182{
183 NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
184 MultiplicationQueueDescriptor,
185 MultiplicationLayer,
Derek Lambertif90c56d2020-01-10 17:14:08 +0000186 DataType::QAsymmU8>();
Conor Kennedyb99480b2019-03-08 08:24:41 +0000187}
188
Sadik Armagan1625efc2021-06-10 18:24:34 +0100189TEST_CASE("CreateDivisionFloatWorkloadTest")
Pablo Telloe61f0712020-01-23 10:37:17 +0000190{
191 NeonCreateElementwiseWorkloadTest<NeonDivisionWorkload,
192 DivisionQueueDescriptor,
193 DivisionLayer,
194 armnn::DataType::Float32>();
195}
196
telsoa01c577f2c2018-08-31 09:22:23 +0100197template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
Nikhil Rajd1340932018-10-18 14:27:50 +0100198static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000199{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000200 Graph graph;
201 NeonWorkloadFactory factory =
202 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
203
Nikhil Rajd1340932018-10-18 14:27:50 +0100204 auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>
205 (factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000206
telsoa01c577f2c2018-08-31 09:22:23 +0100207 // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000208 BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100209 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
210 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Nikhil Rajd1340932018-10-18 14:27:50 +0100211
212 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
213 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
214
Sadik Armagan1625efc2021-06-10 18:24:34 +0100215 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
216 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000217}
218
telsoa01c577f2c2018-08-31 09:22:23 +0100219#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100220TEST_CASE("CreateBatchNormalizationFloat16NchwWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100221{
Matthew Benthamc48ac8c2018-12-12 16:15:59 +0000222 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NCHW);
Nikhil Rajd1340932018-10-18 14:27:50 +0100223}
224
Sadik Armagan1625efc2021-06-10 18:24:34 +0100225TEST_CASE("CreateBatchNormalizationFloat16NhwcWorkload")
Nikhil Rajd1340932018-10-18 14:27:50 +0100226{
Matthew Benthamc48ac8c2018-12-12 16:15:59 +0000227 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100228}
229#endif
230
Sadik Armagan1625efc2021-06-10 18:24:34 +0100231TEST_CASE("CreateBatchNormalizationFloatNchwWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100232{
Matthew Benthamc48ac8c2018-12-12 16:15:59 +0000233 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NCHW);
Nikhil Rajd1340932018-10-18 14:27:50 +0100234}
235
Sadik Armagan1625efc2021-06-10 18:24:34 +0100236TEST_CASE("CreateBatchNormalizationFloatNhwcWorkload")
Nikhil Rajd1340932018-10-18 14:27:50 +0100237{
Matthew Benthamc48ac8c2018-12-12 16:15:59 +0000238 NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100239}
240
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100241template <typename armnn::DataType DataType>
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100242static void NeonCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000243{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000244 Graph graph;
245 NeonWorkloadFactory factory =
246 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
247
248 auto workload = CreateConvolution2dWorkloadTest<NeonConvolution2dWorkload, DataType>(factory, graph, dataLayout);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100249
250 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
251 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
telsoa014fcda012018-03-09 14:13:49 +0000252
telsoa01c577f2c2018-08-31 09:22:23 +0100253 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000254 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100255 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
256 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100257 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
258 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000259}
260
telsoa01c577f2c2018-08-31 09:22:23 +0100261#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100262TEST_CASE("CreateConvolution2dFloat16NchwWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100263{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100264 NeonCreateConvolution2dWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100265}
telsoa01c577f2c2018-08-31 09:22:23 +0100266
Sadik Armagan1625efc2021-06-10 18:24:34 +0100267TEST_CASE("CreateConvolution2dFloat16NhwcWorkload")
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100268{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100269 NeonCreateConvolution2dWorkloadTest<DataType::Float16>(DataLayout::NHWC);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100270}
271
272#endif
Sadik Armagan1625efc2021-06-10 18:24:34 +0100273TEST_CASE("CreateConvolution2dFloatNchwWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100274{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100275 NeonCreateConvolution2dWorkloadTest<DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100276}
277
Sadik Armagan1625efc2021-06-10 18:24:34 +0100278TEST_CASE("CreateConvolution2dFloatNhwcWorkload")
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100279{
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +0100280 NeonCreateConvolution2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
Francis Murtagh0d9d4192018-10-09 16:22:33 +0100281}
282
Sadik Armagan1625efc2021-06-10 18:24:34 +0100283TEST_CASE("CreateConvolution2dFastMathEnabledWorkload")
Sadik Armagan04a72972020-09-14 15:44:18 +0100284{
285 Graph graph;
286 using ModelOptions = std::vector<BackendOptions>;
287 ModelOptions modelOptions = {};
288 BackendOptions cpuAcc("CpuAcc",
289 {
290 { "FastMathEnabled", true }
291 });
292 modelOptions.push_back(cpuAcc);
293 NeonWorkloadFactory factory =
294 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager(), modelOptions);
295
296 auto workload =
Sadik Armagan283a8b42020-09-22 14:35:19 +0100297 CreateConvolution2dWorkloadFastMathTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(factory,
Sadik Armagan04a72972020-09-14 15:44:18 +0100298 graph,
299 DataLayout::NCHW,
300 modelOptions);
301
302 ARMNN_ASSERT(workload != nullptr);
303 auto conv2dWorkload = PolymorphicDowncast<NeonConvolution2dWorkload*>(workload.get());
304 IgnoreUnused(conv2dWorkload);
305 ARMNN_ASSERT(conv2dWorkload != nullptr);
Sadik Armagan283a8b42020-09-22 14:35:19 +0100306 ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
Sadik Armagan04a72972020-09-14 15:44:18 +0100307}
308
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100309template <typename armnn::DataType DataType>
Nikhil Rajcec6b652018-10-12 13:51:57 +0100310static void NeonCreateDepthWiseConvolutionWorkloadTest(DataLayout dataLayout)
311{
312 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000313 NeonWorkloadFactory factory =
314 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
Nikhil Rajcec6b652018-10-12 13:51:57 +0100315
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100316 auto workload = CreateDepthwiseConvolution2dWorkloadTest<NeonDepthwiseConvolutionWorkload,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100317 DataType>(factory, graph, dataLayout);
318
319 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
320 DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100321 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
322 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100323
Mike Kellydb482882019-06-14 12:35:24 +0100324 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
325 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
326 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
327 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
Nikhil Rajcec6b652018-10-12 13:51:57 +0100328
Sadik Armagan1625efc2021-06-10 18:24:34 +0100329 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
330 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
Nikhil Rajcec6b652018-10-12 13:51:57 +0100331}
332
Sadik Armagan1625efc2021-06-10 18:24:34 +0100333TEST_CASE("CreateDepthWiseConvolution2dFloat32NhwcWorkload")
Nikhil Rajcec6b652018-10-12 13:51:57 +0100334{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100335 NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float32>(DataLayout::NHWC);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100336}
337
338#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100339TEST_CASE("CreateDepthWiseConvolution2dFloat16NhwcWorkload")
Nikhil Rajcec6b652018-10-12 13:51:57 +0100340{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100341 NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float16>(DataLayout::NHWC);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100342}
343#endif
344
telsoa01c577f2c2018-08-31 09:22:23 +0100345template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
346static void NeonCreateFullyConnectedWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000347{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000348 Graph graph;
349 NeonWorkloadFactory factory =
350 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
351
352 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000353
telsoa01c577f2c2018-08-31 09:22:23 +0100354 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000355 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100356 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
357 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Kevin Maybe7e35c2020-04-29 17:05:05 +0100358
359 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
360 float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
361 float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
Sadik Armagan1625efc2021-06-10 18:24:34 +0100362 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale)));
363 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType, outputQScale)));
telsoa014fcda012018-03-09 14:13:49 +0000364}
365
telsoa01c577f2c2018-08-31 09:22:23 +0100366#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100367TEST_CASE("CreateFullyConnectedFloat16Workload")
telsoa01c577f2c2018-08-31 09:22:23 +0100368{
kevmay01e448be32018-09-26 10:21:55 +0100369 NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100370}
371#endif
372
Sadik Armagan1625efc2021-06-10 18:24:34 +0100373TEST_CASE("CreateFullyConnectedFloatWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100374{
kevmay01e448be32018-09-26 10:21:55 +0100375 NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float32>();
telsoa01c577f2c2018-08-31 09:22:23 +0100376}
377
Sadik Armagan1625efc2021-06-10 18:24:34 +0100378TEST_CASE("CreateFullyConnectedQAsymmU8Workload")
Kevin Maybe7e35c2020-04-29 17:05:05 +0100379{
380 NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::QAsymmU8>();
381}
382
Sadik Armagan1625efc2021-06-10 18:24:34 +0100383TEST_CASE("CreateFullyConnectedQAsymmS8Workload")
Kevin Maybe7e35c2020-04-29 17:05:05 +0100384{
385 NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::QAsymmS8>();
386}
387
telsoa01c577f2c2018-08-31 09:22:23 +0100388template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
narpra0155a97bc2018-10-02 14:35:53 +0100389static void NeonCreateNormalizationWorkloadTest(DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +0000390{
narpra0155a97bc2018-10-02 14:35:53 +0100391 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000392 NeonWorkloadFactory factory =
393 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
394
narpra0155a97bc2018-10-02 14:35:53 +0100395 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000396
telsoa01c577f2c2018-08-31 09:22:23 +0100397 // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000398 NormalizationQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100399 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
400 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Matteo Martincigha160b242018-10-18 10:33:23 +0100401
402 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
403 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
404
Sadik Armagan1625efc2021-06-10 18:24:34 +0100405 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
406 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000407}
408
telsoa01c577f2c2018-08-31 09:22:23 +0100409#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100410TEST_CASE("CreateNormalizationFloat16NchwWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100411{
narpra0155a97bc2018-10-02 14:35:53 +0100412 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
413}
414
Sadik Armagan1625efc2021-06-10 18:24:34 +0100415TEST_CASE("CreateNormalizationFloat16NhwcWorkload")
narpra0155a97bc2018-10-02 14:35:53 +0100416{
417 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100418}
419#endif
420
Sadik Armagan1625efc2021-06-10 18:24:34 +0100421TEST_CASE("CreateNormalizationFloatNchwWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100422{
narpra0155a97bc2018-10-02 14:35:53 +0100423 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100424}
425
Sadik Armagan1625efc2021-06-10 18:24:34 +0100426TEST_CASE("CreateNormalizationFloatNhwcWorkload")
narpra0155a97bc2018-10-02 14:35:53 +0100427{
428 NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
429}
430
431
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100432template <typename armnn::DataType DataType>
Nina Drozdb48e6862018-10-09 12:09:56 +0100433static void NeonCreatePooling2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
telsoa014fcda012018-03-09 14:13:49 +0000434{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000435 Graph graph;
436 NeonWorkloadFactory factory =
437 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
438
439 auto workload = CreatePooling2dWorkloadTest<NeonPooling2dWorkload, DataType>(factory, graph, dataLayout);
Nina Drozdb48e6862018-10-09 12:09:56 +0100440
441 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2};
442 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2};
telsoa014fcda012018-03-09 14:13:49 +0000443
telsoa01c577f2c2018-08-31 09:22:23 +0100444 // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000445 Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100446 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
447 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100448 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
449 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000450}
451
telsoa01c577f2c2018-08-31 09:22:23 +0100452#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100453TEST_CASE("CreatePooling2dFloat16Workload")
telsoa01c577f2c2018-08-31 09:22:23 +0100454{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100455 NeonCreatePooling2dWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100456}
457#endif
458
Sadik Armagan1625efc2021-06-10 18:24:34 +0100459TEST_CASE("CreatePooling2dFloatNchwWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100460{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100461 NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NCHW);
telsoa01c577f2c2018-08-31 09:22:23 +0100462}
463
Sadik Armagan1625efc2021-06-10 18:24:34 +0100464TEST_CASE("CreatePooling2dFloatNhwcWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100465{
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +0100466 NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
Nina Drozdb48e6862018-10-09 12:09:56 +0100467}
468
Sadik Armagan1625efc2021-06-10 18:24:34 +0100469TEST_CASE("CreatePooling2dUint8NchwWorkload")
Nina Drozdb48e6862018-10-09 12:09:56 +0100470{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000471 NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NCHW);
Nina Drozdb48e6862018-10-09 12:09:56 +0100472}
473
Sadik Armagan1625efc2021-06-10 18:24:34 +0100474TEST_CASE("CreatePooling2dUint8NhwcWorkload")
Nina Drozdb48e6862018-10-09 12:09:56 +0100475{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000476 NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NHWC);
telsoa01c577f2c2018-08-31 09:22:23 +0100477}
478
Nikhil Raj9b461482019-07-03 15:58:31 +0100479static void NeonCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
480 const armnn::TensorShape& alphaShape,
481 const armnn::TensorShape& outputShape,
482 armnn::DataType dataType)
483{
484 Graph graph;
485 NeonWorkloadFactory factory =
486 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
487
488 auto workload = CreatePreluWorkloadTest<NeonPreluWorkload>(factory,
489 graph,
490 inputShape,
491 alphaShape,
492 outputShape,
493 dataType);
494
495 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
496 PreluQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100497 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
498 auto alphaHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
499 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100500 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, dataType)));
501 CHECK(TestNeonTensorHandleInfo(alphaHandle, TensorInfo(alphaShape, dataType)));
502 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, dataType)));
Nikhil Raj9b461482019-07-03 15:58:31 +0100503}
504
505#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100506TEST_CASE("CreatePreluFloat16Workload")
Nikhil Raj9b461482019-07-03 15:58:31 +0100507{
508 NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float16);
509}
510#endif
511
Sadik Armagan1625efc2021-06-10 18:24:34 +0100512TEST_CASE("CreatePreluFloatWorkload")
Nikhil Raj9b461482019-07-03 15:58:31 +0100513{
514 NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float32);
515}
516
Sadik Armagan1625efc2021-06-10 18:24:34 +0100517TEST_CASE("CreatePreluUint8Workload")
Nikhil Raj9b461482019-07-03 15:58:31 +0100518{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000519 NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
Nikhil Raj9b461482019-07-03 15:58:31 +0100520}
521
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100522template <typename armnn::DataType DataType>
telsoa01c577f2c2018-08-31 09:22:23 +0100523static void NeonCreateReshapeWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000524{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000525 Graph graph;
526 NeonWorkloadFactory factory =
527 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
528
529 auto workload = CreateReshapeWorkloadTest<NeonReshapeWorkload, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000530
telsoa01c577f2c2018-08-31 09:22:23 +0100531 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000532 ReshapeQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100533 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
534 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100535 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
536 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType)));
telsoa014fcda012018-03-09 14:13:49 +0000537}
538
telsoa01c577f2c2018-08-31 09:22:23 +0100539#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100540TEST_CASE("CreateReshapeFloat16Workload")
telsoa01c577f2c2018-08-31 09:22:23 +0100541{
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100542 NeonCreateReshapeWorkloadTest<DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100543}
544#endif
545
Sadik Armagan1625efc2021-06-10 18:24:34 +0100546TEST_CASE("CreateReshapeFloatWorkload")
telsoa014fcda012018-03-09 14:13:49 +0000547{
Nattapat Chaimanowongcce11fc2018-10-12 16:30:56 +0100548 NeonCreateReshapeWorkloadTest<DataType::Float32>();
telsoa014fcda012018-03-09 14:13:49 +0000549}
550
Sadik Armagan1625efc2021-06-10 18:24:34 +0100551TEST_CASE("CreateReshapeUint8Workload")
telsoa014fcda012018-03-09 14:13:49 +0000552{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000553 NeonCreateReshapeWorkloadTest<DataType::QAsymmU8>();
telsoa014fcda012018-03-09 14:13:49 +0000554}
555
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100556template <typename ResizeWorkloadType, armnn::DataType DataType>
557static void NeonCreateResizeWorkloadTest(DataLayout dataLayout)
558{
559 Graph graph;
560 NeonWorkloadFactory factory =
561 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
562 auto workload = CreateResizeBilinearWorkloadTest<ResizeWorkloadType, DataType>(factory, graph, dataLayout);
563
564 auto queueDescriptor = workload->GetData();
565
Jan Eilersbb446e52020-04-02 13:56:54 +0100566 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
567 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100568
Colm Donelan25ab3a82021-05-17 13:01:52 +0100569 armnn::PredicateResult predResult(true);
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100570 switch (dataLayout)
571 {
572 case DataLayout::NHWC:
Colm Donelan25ab3a82021-05-17 13:01:52 +0100573 predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
Sadik Armagan1625efc2021-06-10 18:24:34 +0100574 CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
Colm Donelan25ab3a82021-05-17 13:01:52 +0100575 predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
Sadik Armagan1625efc2021-06-10 18:24:34 +0100576 CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100577 break;
Colm Donelan25ab3a82021-05-17 13:01:52 +0100578 default: // DataLayout::NCHW
579 predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
Sadik Armagan1625efc2021-06-10 18:24:34 +0100580 CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
Colm Donelan25ab3a82021-05-17 13:01:52 +0100581 predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
Sadik Armagan1625efc2021-06-10 18:24:34 +0100582 CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100583 }
584}
585
Sadik Armagan1625efc2021-06-10 18:24:34 +0100586TEST_CASE("CreateResizeFloat32NchwWorkload")
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100587{
588 NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
589}
590
Sadik Armagan1625efc2021-06-10 18:24:34 +0100591TEST_CASE("CreateResizeUint8NchwWorkload")
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100592{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000593 NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100594}
595
Sadik Armagan1625efc2021-06-10 18:24:34 +0100596TEST_CASE("CreateResizeFloat32NhwcWorkload")
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100597{
598 NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
599}
600
Sadik Armagan1625efc2021-06-10 18:24:34 +0100601TEST_CASE("CreateResizeUint8NhwcWorkload")
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100602{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000603 NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100604}
605
telsoa01c577f2c2018-08-31 09:22:23 +0100606template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
607static void NeonCreateSoftmaxWorkloadTest()
telsoa014fcda012018-03-09 14:13:49 +0000608{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000609 Graph graph;
610 NeonWorkloadFactory factory =
611 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
612
telsoa01c577f2c2018-08-31 09:22:23 +0100613 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000614
telsoa01c577f2c2018-08-31 09:22:23 +0100615 // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000616 SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100617 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
618 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Sadik Armaganbe88a572020-04-30 11:39:37 +0100619 armnn::TensorInfo tensorInfo({4, 1}, DataType);
620 if (DataType == armnn::DataType::QAsymmU8)
621 {
622 tensorInfo.SetQuantizationOffset(0);
623 tensorInfo.SetQuantizationScale(1.f / 256);
624 }
625 else if (DataType == armnn::DataType::QAsymmS8)
626 {
627 tensorInfo.SetQuantizationOffset(-128);
628 tensorInfo.SetQuantizationScale(1.f / 256);
629 }
Sadik Armagan1625efc2021-06-10 18:24:34 +0100630 CHECK(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
631 CHECK(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
telsoa01c577f2c2018-08-31 09:22:23 +0100632}
633
634#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100635TEST_CASE("CreateSoftmaxFloat16Workload")
telsoa01c577f2c2018-08-31 09:22:23 +0100636{
Sadik Armaganbe88a572020-04-30 11:39:37 +0100637 NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::Float16>();
telsoa01c577f2c2018-08-31 09:22:23 +0100638}
639#endif
640
Sadik Armagan1625efc2021-06-10 18:24:34 +0100641TEST_CASE("CreateSoftmaxFloatWorkload")
telsoa01c577f2c2018-08-31 09:22:23 +0100642{
Sadik Armaganbe88a572020-04-30 11:39:37 +0100643 NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::Float32>();
644}
645
Sadik Armagan1625efc2021-06-10 18:24:34 +0100646TEST_CASE("CreateSoftmaxQAsymmU8Workload")
Sadik Armaganbe88a572020-04-30 11:39:37 +0100647{
648 NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::QAsymmU8>();
649}
650
Sadik Armagan1625efc2021-06-10 18:24:34 +0100651TEST_CASE("CreateSoftmaxQAsymmS8Workload")
Sadik Armaganbe88a572020-04-30 11:39:37 +0100652{
653 NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::QAsymmS8>();
telsoa014fcda012018-03-09 14:13:49 +0000654}
655
Ellen Norris-Thompson29794572019-06-26 16:40:36 +0100656template <typename SpaceToDepthWorkloadType, typename armnn::DataType DataType>
657static void NeonSpaceToDepthWorkloadTest()
658{
659 Graph graph;
660 NeonWorkloadFactory factory =
661 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
662
663 auto workload = CreateSpaceToDepthWorkloadTest<SpaceToDepthWorkloadType, DataType>(factory, graph);
664
665 SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100666 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
667 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Ellen Norris-Thompson29794572019-06-26 16:40:36 +0100668
Sadik Armagan1625efc2021-06-10 18:24:34 +0100669 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 1, 2, 2, 1 }, DataType)));
670 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 1, 1, 1, 4 }, DataType)));
Ellen Norris-Thompson29794572019-06-26 16:40:36 +0100671}
672
Sadik Armagan1625efc2021-06-10 18:24:34 +0100673TEST_CASE("CreateSpaceToDepthFloat32Workload")
Ellen Norris-Thompson29794572019-06-26 16:40:36 +0100674{
675 NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::Float32>();
676}
677
Sadik Armagan1625efc2021-06-10 18:24:34 +0100678TEST_CASE("CreateSpaceToDepthFloat16Workload")
Ellen Norris-Thompson29794572019-06-26 16:40:36 +0100679{
680 NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::Float16>();
681}
682
Sadik Armagan1625efc2021-06-10 18:24:34 +0100683TEST_CASE("CreateSpaceToDepthQAsymm8Workload")
Ellen Norris-Thompson29794572019-06-26 16:40:36 +0100684{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000685 NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
Ellen Norris-Thompson29794572019-06-26 16:40:36 +0100686}
687
Sadik Armagan1625efc2021-06-10 18:24:34 +0100688TEST_CASE("CreateSpaceToDepthQSymm16Workload")
Ellen Norris-Thompson29794572019-06-26 16:40:36 +0100689{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000690 NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
Ellen Norris-Thompson29794572019-06-26 16:40:36 +0100691}
692
Sadik Armagan1625efc2021-06-10 18:24:34 +0100693TEST_CASE("CreateSplitterWorkload")
telsoa014fcda012018-03-09 14:13:49 +0000694{
695 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000696 NeonWorkloadFactory factory =
697 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
698
Nattapat Chaimanowong14766d72018-10-12 15:09:53 +0100699 auto workload = CreateSplitterWorkloadTest<NeonSplitterWorkload, DataType::Float32>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000700
telsoa01c577f2c2018-08-31 09:22:23 +0100701 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
telsoa014fcda012018-03-09 14:13:49 +0000702 SplitterQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100703 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100704 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32)));
surmeh013537c2c2018-05-18 16:31:43 +0100705
Jan Eilersbb446e52020-04-02 13:56:54 +0100706 auto outputHandle0 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100707 CHECK(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32)));
surmeh013537c2c2018-05-18 16:31:43 +0100708
Jan Eilersbb446e52020-04-02 13:56:54 +0100709 auto outputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100710 CHECK(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32)));
surmeh013537c2c2018-05-18 16:31:43 +0100711
Jan Eilersbb446e52020-04-02 13:56:54 +0100712 auto outputHandle2 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100713 CHECK(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
telsoa014fcda012018-03-09 14:13:49 +0000714}
715
Sadik Armagan1625efc2021-06-10 18:24:34 +0100716TEST_CASE("CreateSplitterConcat")
telsoa014fcda012018-03-09 14:13:49 +0000717{
telsoa01c577f2c2018-08-31 09:22:23 +0100718 // Tests that it is possible to decide which output of the splitter layer
Jim Flynne242f2d2019-05-22 14:24:13 +0100719 // should be lined to which input of the concat layer.
telsoa01c577f2c2018-08-31 09:22:23 +0100720 // We tested that is is possible to specify 0th output
Jim Flynne242f2d2019-05-22 14:24:13 +0100721 // of the splitter to be the 1st input to the concat, and the 1st output of the splitter to be 0th input
722 // of the concat.
telsoa014fcda012018-03-09 14:13:49 +0000723
724 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000725 NeonWorkloadFactory factory =
726 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
telsoa014fcda012018-03-09 14:13:49 +0000727
728 auto workloads =
Jim Flynne242f2d2019-05-22 14:24:13 +0100729 CreateSplitterConcatWorkloadTest<NeonSplitterWorkload, NeonConcatWorkload,
telsoa01c577f2c2018-08-31 09:22:23 +0100730 DataType::Float32>(factory, graph);
telsoa014fcda012018-03-09 14:13:49 +0000731
732 auto wlSplitter = std::move(workloads.first);
Jim Flynne242f2d2019-05-22 14:24:13 +0100733 auto wlConcat = std::move(workloads.second);
telsoa014fcda012018-03-09 14:13:49 +0000734
telsoa01c577f2c2018-08-31 09:22:23 +0100735 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
Derek Lambertic81855f2019-06-13 17:34:19 +0100736 armnn::IAclTensorHandle* sOut0 = dynamic_cast<armnn::IAclTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
737 armnn::IAclTensorHandle* sOut1 = dynamic_cast<armnn::IAclTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
738 armnn::IAclTensorHandle* mIn0 = dynamic_cast<armnn::IAclTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
739 armnn::IAclTensorHandle* mIn1 = dynamic_cast<armnn::IAclTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
telsoa014fcda012018-03-09 14:13:49 +0000740
Sadik Armagan1625efc2021-06-10 18:24:34 +0100741 CHECK(sOut0);
742 CHECK(sOut1);
743 CHECK(mIn0);
744 CHECK(mIn1);
telsoa014fcda012018-03-09 14:13:49 +0000745
746 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
747
Sadik Armagan1625efc2021-06-10 18:24:34 +0100748 CHECK(validDataPointers);
telsoa014fcda012018-03-09 14:13:49 +0000749}
750
Sadik Armagan1625efc2021-06-10 18:24:34 +0100751TEST_CASE("CreateSingleOutputMultipleInputs")
telsoa014fcda012018-03-09 14:13:49 +0000752{
telsoa01c577f2c2018-08-31 09:22:23 +0100753 // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
754 // We created a splitter with two outputs. That each of those outputs is used by two different activation layers
telsoa014fcda012018-03-09 14:13:49 +0000755
756 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000757 NeonWorkloadFactory factory =
758 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
759
Nattapat Chaimanowong14766d72018-10-12 15:09:53 +0100760 std::unique_ptr<NeonSplitterWorkload> wlSplitter;
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +0100761 std::unique_ptr<NeonActivationWorkload> wlActiv0_0;
762 std::unique_ptr<NeonActivationWorkload> wlActiv0_1;
763 std::unique_ptr<NeonActivationWorkload> wlActiv1_0;
764 std::unique_ptr<NeonActivationWorkload> wlActiv1_1;
telsoa014fcda012018-03-09 14:13:49 +0000765
Nattapat Chaimanowong14766d72018-10-12 15:09:53 +0100766 CreateSplitterMultipleInputsOneOutputWorkloadTest<NeonSplitterWorkload,
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +0100767 NeonActivationWorkload, DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1,
768 wlActiv1_0, wlActiv1_1);
telsoa014fcda012018-03-09 14:13:49 +0000769
Derek Lambertic81855f2019-06-13 17:34:19 +0100770 armnn::IAclTensorHandle* sOut0 = dynamic_cast<armnn::IAclTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
771 armnn::IAclTensorHandle* sOut1 = dynamic_cast<armnn::IAclTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
772 armnn::IAclTensorHandle* activ0_0Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
773 armnn::IAclTensorHandle* activ0_1Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
774 armnn::IAclTensorHandle* activ1_0Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
775 armnn::IAclTensorHandle* activ1_1Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
telsoa014fcda012018-03-09 14:13:49 +0000776
777
Sadik Armagan1625efc2021-06-10 18:24:34 +0100778 CHECK(sOut0);
779 CHECK(sOut1);
780 CHECK(activ0_0Im);
781 CHECK(activ0_1Im);
782 CHECK(activ1_0Im);
783 CHECK(activ1_1Im);
telsoa014fcda012018-03-09 14:13:49 +0000784
785 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
786 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
787
Sadik Armagan1625efc2021-06-10 18:24:34 +0100788 CHECK(validDataPointers);
telsoa014fcda012018-03-09 14:13:49 +0000789}
790
Matteo Martincighdb16dd32019-08-27 16:41:11 +0100791#if defined(ARMNNREF_ENABLED)
Matteo Martincighe67edb22019-08-14 14:05:46 +0100792
793// This test unit needs the reference backend, it's not available if the reference backend is not built
794
Sadik Armagan1625efc2021-06-10 18:24:34 +0100795TEST_CASE("CreateMemCopyWorkloadsNeon")
telsoa014fcda012018-03-09 14:13:49 +0000796{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000797 NeonWorkloadFactory factory =
798 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
Derek Lambertic81855f2019-06-13 17:34:19 +0100799 CreateMemCopyWorkloads<IAclTensorHandle>(factory);
telsoa014fcda012018-03-09 14:13:49 +0000800}
801
Matteo Martincighe67edb22019-08-14 14:05:46 +0100802#endif
803
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100804template <typename L2NormalizationWorkloadType, typename armnn::DataType DataType>
805static void NeonCreateL2NormalizationWorkloadTest(DataLayout dataLayout)
806{
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100807 Graph graph;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000808 NeonWorkloadFactory factory =
809 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
810
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100811 auto workload =
812 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100813
814 // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
815 L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100816 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
817 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Matteo Martincigh2400b6d2018-10-09 18:19:20 +0100818
819 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
820 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
821 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
822 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
823
Sadik Armagan1625efc2021-06-10 18:24:34 +0100824 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
825 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100826}
827
828#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100829TEST_CASE("CreateL2NormalizationFloat16NchwWorkload")
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100830{
831 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
832}
833
Sadik Armagan1625efc2021-06-10 18:24:34 +0100834TEST_CASE("CreateL2NormalizationFloat16NhwcWorkload")
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100835{
836 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
837}
838#endif
839
Sadik Armagan1625efc2021-06-10 18:24:34 +0100840TEST_CASE("CreateL2NormalizationNchwWorkload")
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100841{
842 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
843}
844
Sadik Armagan1625efc2021-06-10 18:24:34 +0100845TEST_CASE("CreateL2NormalizationNhwcWorkload")
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100846{
847 NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
848}
849
Keith Davis69e653f2020-07-02 11:49:26 +0100850template <typename LogSoftmaxWorkloadType, typename armnn::DataType DataType>
851static void NeonCreateLogSoftmaxWorkloadTest()
852{
853 Graph graph;
854 NeonWorkloadFactory factory =
855 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
856
857 auto workload = CreateLogSoftmaxWorkloadTest<LogSoftmaxWorkloadType, DataType>(factory, graph);
858
859 // Checks that outputs and inputs are as we expect them (see definition of CreateLogSoftmaxWorkloadTest).
860 LogSoftmaxQueueDescriptor queueDescriptor = workload->GetData();
861 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
862 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
863 armnn::TensorInfo tensorInfo({4, 1}, DataType);
864
Sadik Armagan1625efc2021-06-10 18:24:34 +0100865 CHECK(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
866 CHECK(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
Keith Davis69e653f2020-07-02 11:49:26 +0100867}
868
869#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100870TEST_CASE("CreateLogSoftmaxFloat16Workload")
Keith Davis69e653f2020-07-02 11:49:26 +0100871{
872 NeonCreateLogSoftmaxWorkloadTest<NeonLogSoftmaxWorkload, DataType::Float16>();
873}
874#endif
875
Sadik Armagan1625efc2021-06-10 18:24:34 +0100876TEST_CASE("CreateLogSoftmaxFloatWorkload")
Keith Davis69e653f2020-07-02 11:49:26 +0100877{
878 NeonCreateLogSoftmaxWorkloadTest<NeonLogSoftmaxWorkload, DataType::Float32>();
879}
880
Jan Eilersad5293a2019-07-08 09:57:55 +0100881template <typename LstmWorkloadType>
882static void NeonCreateLstmWorkloadTest()
883{
884 Graph graph;
885 NeonWorkloadFactory factory =
886 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
887
888 auto workload = CreateLstmWorkloadTest<LstmWorkloadType>(factory, graph);
889
890 LstmQueueDescriptor queueDescriptor = workload->GetData();
891
Jan Eilersbb446e52020-04-02 13:56:54 +0100892 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
893 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
Jan Eilersad5293a2019-07-08 09:57:55 +0100894
Sadik Armagan1625efc2021-06-10 18:24:34 +0100895 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 2, 2 }, DataType::Float32)));
896 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 2, 4 }, DataType::Float32)));
Jan Eilersad5293a2019-07-08 09:57:55 +0100897}
898
Sadik Armagan1625efc2021-06-10 18:24:34 +0100899TEST_CASE("CreateLSTMWorkloadFloatWorkload")
Jan Eilersad5293a2019-07-08 09:57:55 +0100900{
901 NeonCreateLstmWorkloadTest<NeonLstmFloatWorkload>();
902}
903
Jim Flynne242f2d2019-05-22 14:24:13 +0100904template <typename ConcatWorkloadType, armnn::DataType DataType>
905static void NeonCreateConcatWorkloadTest(std::initializer_list<unsigned int> outputShape,
narpra015cdda352018-11-19 15:30:27 +0000906 unsigned int concatAxis)
907{
908 Graph graph;
909 NeonWorkloadFactory factory =
910 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
911
Jim Flynne242f2d2019-05-22 14:24:13 +0100912 auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
narpra015cdda352018-11-19 15:30:27 +0000913
Jim Flynne242f2d2019-05-22 14:24:13 +0100914 ConcatQueueDescriptor queueDescriptor = workload->GetData();
Jan Eilersbb446e52020-04-02 13:56:54 +0100915 auto inputHandle0 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
916 auto inputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
917 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
narpra015cdda352018-11-19 15:30:27 +0000918
Sadik Armagan1625efc2021-06-10 18:24:34 +0100919 CHECK(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType)));
920 CHECK(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType)));
921 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
narpra015cdda352018-11-19 15:30:27 +0000922}
923
Sadik Armagan1625efc2021-06-10 18:24:34 +0100924TEST_CASE("CreateConcatDim0Float32Workload")
narpra015cdda352018-11-19 15:30:27 +0000925{
Jim Flynne242f2d2019-05-22 14:24:13 +0100926 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
narpra015cdda352018-11-19 15:30:27 +0000927}
928
Sadik Armagan1625efc2021-06-10 18:24:34 +0100929TEST_CASE("CreateConcatDim1Float32Workload")
narpra015cdda352018-11-19 15:30:27 +0000930{
Jim Flynne242f2d2019-05-22 14:24:13 +0100931 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
narpra015cdda352018-11-19 15:30:27 +0000932}
933
Sadik Armagan1625efc2021-06-10 18:24:34 +0100934TEST_CASE("CreateConcatDim3Float32Workload")
narpra015cdda352018-11-19 15:30:27 +0000935{
Jim Flynne242f2d2019-05-22 14:24:13 +0100936 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
narpra015cdda352018-11-19 15:30:27 +0000937}
938
Sadik Armagan1625efc2021-06-10 18:24:34 +0100939TEST_CASE("CreateConcatDim0Uint8Workload")
narpra0163b08822018-11-20 11:29:12 +0000940{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000941 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
narpra0163b08822018-11-20 11:29:12 +0000942}
943
Sadik Armagan1625efc2021-06-10 18:24:34 +0100944TEST_CASE("CreateConcatDim1Uint8Workload")
narpra0163b08822018-11-20 11:29:12 +0000945{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000946 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
narpra0163b08822018-11-20 11:29:12 +0000947}
948
Sadik Armagan1625efc2021-06-10 18:24:34 +0100949TEST_CASE("CreateConcatDim3Uint8Workload")
narpra0163b08822018-11-20 11:29:12 +0000950{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000951 NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
narpra0163b08822018-11-20 11:29:12 +0000952}
953
Matthew Jackson87f65ea2019-08-01 10:01:34 +0100954template <armnn::DataType DataType>
955static void NeonCreateStackWorkloadTest(const std::initializer_list<unsigned int>& inputShape,
956 const std::initializer_list<unsigned int>& outputShape,
957 unsigned int axis,
958 unsigned int numInputs)
959{
960 armnn::Graph graph;
961 NeonWorkloadFactory factory =
962 NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
963
964 auto workload = CreateStackWorkloadTest<NeonStackWorkload, DataType>(factory,
965 graph,
966 TensorShape(inputShape),
967 TensorShape(outputShape),
968 axis,
969 numInputs);
970
971 // Check inputs and output are as expected
972 StackQueueDescriptor queueDescriptor = workload->GetData();
973 for (unsigned int i = 0; i < numInputs; ++i)
974 {
Jan Eilersbb446e52020-04-02 13:56:54 +0100975 auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[i]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100976 CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
Matthew Jackson87f65ea2019-08-01 10:01:34 +0100977 }
Jan Eilersbb446e52020-04-02 13:56:54 +0100978 auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +0100979 CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
Matthew Jackson87f65ea2019-08-01 10:01:34 +0100980}
981
Sadik Armagan1625efc2021-06-10 18:24:34 +0100982TEST_CASE("CreateStackFloat32Workload")
Matthew Jackson87f65ea2019-08-01 10:01:34 +0100983{
984 NeonCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
985}
986
Matthew Jacksone69c3992019-09-09 14:31:21 +0100987#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
Sadik Armagan1625efc2021-06-10 18:24:34 +0100988TEST_CASE("CreateStackFloat16Workload")
Matthew Jacksone69c3992019-09-09 14:31:21 +0100989{
990 NeonCreateStackWorkloadTest<armnn::DataType::Float16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
991}
992#endif
993
Sadik Armagan1625efc2021-06-10 18:24:34 +0100994TEST_CASE("CreateStackUint8Workload")
Matthew Jackson87f65ea2019-08-01 10:01:34 +0100995{
Derek Lambertif90c56d2020-01-10 17:14:08 +0000996 NeonCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
Matthew Jackson87f65ea2019-08-01 10:01:34 +0100997}
998
Francis Murtagh4fc3c482019-08-02 13:20:54 +0100999template <typename QuantizedLstmWorkloadType>
1000static void NeonCreateQuantizedLstmWorkloadTest()
1001{
Francis Murtagh4fc3c482019-08-02 13:20:54 +01001002 Graph graph;
1003 NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
1004
1005 auto workload = CreateQuantizedLstmWorkloadTest<QuantizedLstmWorkloadType>(factory, graph);
1006
1007 QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
1008
Jan Eilersbb446e52020-04-02 13:56:54 +01001009 IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +01001010 CHECK((inputHandle->GetShape() == TensorShape({2, 2})));
1011 CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
Francis Murtagh4fc3c482019-08-02 13:20:54 +01001012
Jan Eilersbb446e52020-04-02 13:56:54 +01001013 IAclTensorHandle* cellStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
Sadik Armagan1625efc2021-06-10 18:24:34 +01001014 CHECK((cellStateInHandle->GetShape() == TensorShape({2, 4})));
1015 CHECK((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
Francis Murtagh4fc3c482019-08-02 13:20:54 +01001016
Jan Eilersbb446e52020-04-02 13:56:54 +01001017 IAclTensorHandle* outputStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[2]);
Sadik Armagan1625efc2021-06-10 18:24:34 +01001018 CHECK((outputStateInHandle->GetShape() == TensorShape({2, 4})));
1019 CHECK((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
Francis Murtagh4fc3c482019-08-02 13:20:54 +01001020
Jan Eilersbb446e52020-04-02 13:56:54 +01001021 IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +01001022 CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
1023 CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
Francis Murtagh4fc3c482019-08-02 13:20:54 +01001024
Jan Eilersbb446e52020-04-02 13:56:54 +01001025 IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
Sadik Armagan1625efc2021-06-10 18:24:34 +01001026 CHECK((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
1027 CHECK((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
Francis Murtagh4fc3c482019-08-02 13:20:54 +01001028}
1029
Sadik Armagan1625efc2021-06-10 18:24:34 +01001030TEST_CASE("CreateQuantizedLstmWorkload")
Francis Murtagh4fc3c482019-08-02 13:20:54 +01001031{
1032 NeonCreateQuantizedLstmWorkloadTest<NeonQuantizedLstmWorkload>();
1033}
1034
James Conroycc340932020-05-12 18:08:52 +01001035template <typename QLstmWorkloadType>
1036static void NeonCreateQLstmWorkloadTest()
1037{
1038 Graph graph;
1039 NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
1040
1041 auto workload = CreateQLstmWorkloadTest<QLstmWorkloadType>(factory, graph);
1042 QLstmQueueDescriptor queueDescriptor = workload->GetData();
1043
1044 IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
Sadik Armagan1625efc2021-06-10 18:24:34 +01001045 CHECK((inputHandle->GetShape() == TensorShape({2, 4})));
1046 CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
James Conroycc340932020-05-12 18:08:52 +01001047
1048 IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
Sadik Armagan1625efc2021-06-10 18:24:34 +01001049 CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
1050 CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
James Conroycc340932020-05-12 18:08:52 +01001051
1052 IAclTensorHandle* outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
Sadik Armagan1625efc2021-06-10 18:24:34 +01001053 CHECK((outputHandle->GetShape() == TensorShape({2, 4})));
1054 CHECK((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
James Conroycc340932020-05-12 18:08:52 +01001055}
1056
Sadik Armagan1625efc2021-06-10 18:24:34 +01001057TEST_CASE("CreateQLstmWorkloadTest")
James Conroycc340932020-05-12 18:08:52 +01001058{
1059 NeonCreateQLstmWorkloadTest<NeonQLstmWorkload>();
1060}
1061
Teresa Charlin98b0dcb2022-01-18 22:09:29 +00001062template <armnn::DataType DataType>
1063static void NeonCreateActivationWorkloadReplaceFunctionsTest()
1064{
1065 shared_ptr<NeonMemoryManager> memoryManager = make_shared<NeonMemoryManager>();
1066
1067 Graph graph;
1068 NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(memoryManager);
1069 // input and output are created as armnn::TensorInfo tensorInfo({1, 1}, DataType)
1070 auto workloadPtr = CreateActivationWorkloadTest<NeonActivationWorkload, DataType>(factory, graph);
1071
1072 // new input and output tensor handlers are created and then replace in the workload
1073 const NeonTensorHandleFactory tensorHandleFactory(memoryManager);
1074 TensorInfo inputInfo({2 , 2}, DataType::Float16);
1075 TensorInfo outputInfo({2 , 2}, DataType::Float16);
1076 unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
1077 inputHandle->Allocate();
1078 unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
1079 outputHandle->Allocate();
1080
1081 unsigned int slot = 0;
1082 CHECK_THROWS_AS(workloadPtr->ReplaceInputTensorHandle(inputHandle.get(), slot), UnimplementedException);
1083 CHECK_THROWS_AS(workloadPtr->ReplaceOutputTensorHandle(outputHandle.get(), slot), UnimplementedException);
1084}
1085
1086TEST_CASE("NeonReplaceFunctionsfromFloat32toFloat16ActivationWorkload")
1087{
1088 NeonCreateActivationWorkloadReplaceFunctionsTest<armnn::DataType::Float32>();
1089}
1090
1091TEST_CASE("NeonReplaceFunctionsfromUint8toFloat16ActivationWorkload")
1092{
1093 NeonCreateActivationWorkloadReplaceFunctionsTest<armnn::DataType::QAsymmU8>();
1094}
1095
Sadik Armagan1625efc2021-06-10 18:24:34 +01001096}