blob: 5c41d3aac3a454e191b04c195328e2d4e492d8e0 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
Keith Davisa57eccb2019-06-14 17:33:22 +010036#include "SpaceToDepthTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000046#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010047#include "QuantizeTestImpl.hpp"
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010048#include "TransposeConvolution2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000049
telsoa01c577f2c2018-08-31 09:22:23 +010050// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000051static std::vector<float> ConvInput3x8x16({
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
75 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
76});
77
telsoa01c577f2c2018-08-31 09:22:23 +010078// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000079static std::vector<float> Bias2({0, 2});
80
telsoa01c577f2c2018-08-31 09:22:23 +010081// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000082template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +010083boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +000084{
85 if(biasEnabled)
86 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000087 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +010088 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +000089 return bias;
90 }
91 else
92 {
93 return boost::multi_array<T, 1>();
94 }
95}
96
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000097template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000098LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
99 armnn::IWorkloadFactory& workloadFactory,
100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
101 float qScale,
102 int32_t qOffset,
103 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000104 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000105{
telsoa01c577f2c2018-08-31 09:22:23 +0100106 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000107 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000108 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
109
telsoa01c577f2c2018-08-31 09:22:23 +0100110 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000111 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000112 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
113 QuantizedVector<T>(qScale, qOffset, {
114 1, 1, 1,
115 1, -1, 1,
116 1, 1, 1,
117 1, 1, 1,
118 1, 1, 1,
119
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124 0, 0, 0,
125
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130 2, 2, 2,
131
132
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137 0, 0, 0,
138
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143 1, 1, 1,
144
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0,
149 0, 0, 0
150 })));
151
telsoa01c577f2c2018-08-31 09:22:23 +0100152 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000153 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000154 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
155 QuantizedVector<T>(qScale, qOffset, {
156 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
157 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
158 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
161 -23.5f, -23.5f, -23.5f,
162
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
166 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
167 })));
168
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000169 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
170 workloadFactory,
171 memoryManager,
172 input,
173 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100174 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000175 expectedOutput,
176 qScale,
177 qOffset,
178 layout);
telsoa014fcda012018-03-09 14:13:49 +0000179}
180
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000181template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
182 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000183LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
184 armnn::IWorkloadFactory& workloadFactory,
185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
186 float qScale,
187 int32_t qOffset,
188 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000189 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000190{
telsoa01c577f2c2018-08-31 09:22:23 +0100191 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000192
telsoa01c577f2c2018-08-31 09:22:23 +0100193 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000194 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000195 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
196
telsoa01c577f2c2018-08-31 09:22:23 +0100197 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000198 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000199 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
200 QuantizedVector<T>(qScale, qOffset, {
201 1, 1, 1,
202 1, -1, 1,
203 1, 1, 1,
204
205 0, 0, 0,
206 0, 0, 0,
207 0, 0, 0,
208
209 2, 2, 2,
210 2, 2, 2,
211 2, 2, 2,
212
213
214 0, 0, 0,
215 0, 0, 0,
216 0, 0, 0,
217
218 1, 1, 1,
219 1, 1, 1,
220 1, 1, 1,
221
222 0, 0, 0,
223 0, 0, 0,
224 0, 0, 0
225 })));
226
telsoa01c577f2c2018-08-31 09:22:23 +0100227 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000228 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000229 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
230 QuantizedVector<T>(qScale, qOffset, {
231 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
232 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
237
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
243 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
244 })));
245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000246 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
247 workloadFactory,
248 memoryManager,
249 input,
250 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100251 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000252 expectedOutput,
253 qScale,
254 qOffset,
255 layout);
telsoa014fcda012018-03-09 14:13:49 +0000256}
257
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000258template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000259LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
260 armnn::IWorkloadFactory& workloadFactory,
261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
262 float qScale,
263 int32_t qOffset,
264 bool biasEnabled,
265 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100266{
267 // Use common single-batch 5x5 image.
268
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000269 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100270 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
271 {
272 1, 5, 2, 3,
273 8, 7, 3, 6,
274 3, 3, 9, 1
275 });
276
277
278 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000279 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100280 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
281 4, 5, 6,
282 0, 0, 0,
283 3, 2, 1
284 });
285
286 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000287 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100288
289 const std::vector<float> outputData =
290 {
291 23, 41, 33, 21,
292 44, 65, 76, 52,
293 82, 85, 79, 42
294 };
295
296 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
297
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000298 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
299 workloadFactory,
300 memoryManager,
301 input,
302 kernel,
303 boost::multi_array<T, 1>(),
304 expectedOutput,
305 dataLayout,
306 qScale,
307 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100308}
309
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000310template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000311LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
312 armnn::IWorkloadFactory& workloadFactory,
313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
314 float qScale,
315 int32_t qOffset,
316 bool biasEnabled,
317 const armnn::DataLayout& dataLayout)
318{
319 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000320 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000321 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
322 {
323 1, 5, 2, 3, 5,
324 8, 7, 3, 6, 3,
325 3, 3, 9, 1, 9,
326 4, 1, 8, 1, 3,
327 6, 8, 1, 9, 2
328 });
329
330 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000331 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000332 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
333 {
334 4, 5, 6,
335 0, 0, 0,
336 3, 2, 1
337 });
338
339 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000340 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000341
342 const std::vector<T> outputData =
343 {
344 23, 33, 24,
345 91, 99, 48,
346 26, 50, 19
347 };
348
349 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
350
351 uint32_t padLeft = 1;
352 uint32_t padTop = 1;
353 uint32_t padRight = 1;
354 uint32_t padBottom = 1;
355 uint32_t strideX = 2;
356 uint32_t strideY = 2;
357
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000358 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
359 workloadFactory,
360 memoryManager,
361 input,
362 kernel,
363 boost::multi_array<T, 1>(),
364 expectedOutput,
365 dataLayout,
366 qScale,
367 qOffset,
368 padLeft,
369 padTop,
370 padRight,
371 padBottom,
372 strideX,
373 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000374}
375
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000376LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
377 armnn::IWorkloadFactory& workloadFactory,
378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
379 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000380 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000381{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000382 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
383 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000384}
385
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000386LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
387 armnn::IWorkloadFactory& workloadFactory,
388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
389 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000390 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000391{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000392 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
393 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000394}
395
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000396LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
397 armnn::IWorkloadFactory& workloadFactory,
398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
399 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000400 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000401{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000402 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
403 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000404}
405
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000406LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
407 armnn::IWorkloadFactory& workloadFactory,
408 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
409 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100410{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000411 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
412 workloadFactory,
413 memoryManager,
414 0.f,
415 0,
416 biasEnabled,
417 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100418}
419
Mike Kelly7332ed82018-12-20 17:03:06 +0000420LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
421 armnn::IWorkloadFactory& workloadFactory,
422 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
423 bool biasEnabled,
424 const armnn::DataLayout layout)
425{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000426 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
427 workloadFactory,
428 memoryManager,
429 0.f,
430 0,
431 biasEnabled,
432 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000433}
434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000435LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
436 armnn::IWorkloadFactory& workloadFactory,
437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
438 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000439 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000440{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000441 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
442 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000443}
444
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100445LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
446 armnn::IWorkloadFactory& workloadFactory,
447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
448 bool biasEnabled,
449 const armnn::DataLayout layout)
450{
451return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
452 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
453}
454
455LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
456 armnn::IWorkloadFactory& workloadFactory,
457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
458 bool biasEnabled,
459 const armnn::DataLayout layout)
460{
461 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
462 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
463}
464
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000465template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
466 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000467LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
468 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000470 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000471 float qScale,
472 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000473{
telsoa01c577f2c2018-08-31 09:22:23 +0100474 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000475 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000476 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
477 QuantizedVector<T>(qScale, qOffset, {
478 11,21,31,
479 12,22,32,
480 13,23,33
481 })));
482
telsoa01c577f2c2018-08-31 09:22:23 +0100483 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000484 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000485 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
486 QuantizedVector<T>(qScale, qOffset, {
487 -11,-21,
488 -12,-22,
489 })));
490
telsoa01c577f2c2018-08-31 09:22:23 +0100491// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000492// Manually calculated like this:
493//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
494//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
495//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
496//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
497//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
498//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
499//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000500 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000501 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
502 QuantizedVector<T>(qScale, qOffset, {
503 0, 0, 0, 0, 0, 0,
504 -242, -594, -934, -372, 0, 0,
505 -495, -1190, -1850, -725, 0, 0,
506 -538, -1256, -1916, -748, 0, 0,
507 -273, -626, -946, -363, 0, 0,
508 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0,
510 0, 0, 0, 0, 0, 0
511 })));
512
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000513 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
514 workloadFactory,
515 memoryManager,
516 input,
517 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100518 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000519 expectedOutput,
520 qScale,
521 qOffset,
522 layout,
523 1, // Padding left.
524 2, // Padding top.
525 3, // Padding right.
526 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000527}
528
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000529template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
530 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000531LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
532 armnn::IWorkloadFactory& workloadFactory,
533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000534 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000535 float qScale,
536 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000537{
telsoa01c577f2c2018-08-31 09:22:23 +0100538 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000539 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000540 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
541 QuantizedVector<T>(qScale, qOffset, {
542 11,21,31,41,51,
543 12,22,32,42,52,
544 13,23,33,43,53,
545 14,24,34,44,54,
546 15,25,35,45,55,
547 })));
548
telsoa01c577f2c2018-08-31 09:22:23 +0100549 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000550 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000551 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
552 QuantizedVector<T>(qScale, qOffset, {
553 -11,-21,-31,-41,
554 -12,-22,-32,-42,
555 -13,-23,-33,-43,
556 -14,-24,-34,-44,
557 })));
558
telsoa01c577f2c2018-08-31 09:22:23 +0100559 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000560 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000561 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
562 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
563 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000564 -7140, -10580, -13940, -9300, -5230,
565 -9590, -14120, -18520, -12290, -6860,
566 -9980, -14560, -18960, -12560, -7000,
567 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100568 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000569 })));
570
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000571 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
572 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000573 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000574 input,
575 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100576 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000577 expectedOutput,
578 qScale,
579 qOffset,
narpra015f703182018-10-26 16:24:58 +0100580 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100581 1, // Padding left.
582 1, // Padding top.
583 2, // Padding right.
584 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100585}
586
Teresa Charlinedeeb162019-06-14 11:09:19 +0100587LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
588 armnn::IWorkloadFactory& workloadFactory,
589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
590 armnn::DataLayout layout)
591{
592 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
593 workloadFactory, memoryManager, layout, 0.0f, 0);
594}
595
596LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
597 armnn::IWorkloadFactory& workloadFactory,
598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
599 armnn::DataLayout layout)
600{
601 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
602 <armnn::DataType::Float32, armnn::DataType::Float32>(
603 workloadFactory, memoryManager, layout, 0.0f, 0);
604}
605
606LayerTestResult<float, 4> Convolution1dTest(
607 armnn::IWorkloadFactory& workloadFactory,
608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
609 bool biasEnabled)
610{
611 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
612 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
613}
614
615LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
616 armnn::IWorkloadFactory& workloadFactory,
617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
618 bool biasEnabled)
619{
620 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
621 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
622}
623
624LayerTestResult<float,4> CompareConvolution2dTest(
625 armnn::IWorkloadFactory& workloadFactory,
626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
627 armnn::IWorkloadFactory& refWorkloadFactory)
628{
629 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
630 workloadFactory, memoryManager, refWorkloadFactory);
631}
632
633template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
634LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
635 armnn::IWorkloadFactory& workloadFactory,
636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
637 const std::vector<float>& inputNoQuantizedValues,
638 armnn::TensorInfo& inputTensorInfo,
639 const std::vector<float>& kernelNoQuantizedValues,
640 armnn::TensorInfo& kernelTensorInfo,
641 const std::vector<float>& outputExpectedNoQuantizedValues,
642 armnn::TensorInfo& outputTensorInfo,
643 uint32_t dilationX,
644 uint32_t dilationY,
645 armnn::DataLayout layout = armnn::DataLayout::NCHW,
646 bool biasEnabled = false
647)
648{
649 float qScale;
650 int32_t qOffset;
651 switch (ArmnnType)
652 {
653 case armnn::DataType::QuantisedAsymm8:
654 {
655 qScale = 0.1f;
656 qOffset = 128;
657 break;
658 }
659 case armnn::DataType::QuantisedSymm16:
660 {
661 qScale = 0.1f;
662 qOffset = 0;
663 break;
664 }
665 case armnn::DataType::Float32:
666 default:
667 {
668 qScale = 0.f;
669 qOffset = 0;
670 break;
671 }
672 }
673
674 inputTensorInfo.SetQuantizationScale(qScale);
675 inputTensorInfo.SetQuantizationOffset(qOffset);
676 kernelTensorInfo.SetQuantizationScale(qScale);
677 kernelTensorInfo.SetQuantizationOffset(qOffset);
678 outputTensorInfo.SetQuantizationScale(qScale);
679 outputTensorInfo.SetQuantizationOffset(qOffset);
680
681 auto input = MakeTensor<T, 4>(inputTensorInfo,
682 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
683 inputTensorInfo.GetQuantizationOffset(),
684 inputNoQuantizedValues)));
685 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
686 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
687 kernelTensorInfo.GetQuantizationOffset(),
688 kernelNoQuantizedValues)));
689 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
690 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
691 outputTensorInfo.GetQuantizationOffset(),
692 outputExpectedNoQuantizedValues)));
693
694 uint32_t padLeft = 0;
695 uint32_t padTop = 0;
696 uint32_t padRight = 0;
697 uint32_t padBottom = 0;
698 uint32_t strideX = 1;
699 uint32_t strideY = 1;
700
701 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
702 workloadFactory,
703 memoryManager,
704 input,
705 kernel,
706 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
707 expectedOutput,
708 qScale,
709 qOffset,
710 layout,
711 padLeft,
712 padTop,
713 padRight,
714 padBottom,
715 strideX,
716 strideY,
717 dilationX,
718 dilationY);
719}
720
721template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
722LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
723 armnn::IWorkloadFactory& workloadFactory,
724 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
725 bool biasEnabled,
726 const armnn::DataLayout layout)
727{
728 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
729 std::vector<float> inputNoQuantizedValues =
730 {
731 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
732 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
733 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
734 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
735 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
736 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
737 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
738 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
739 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
740 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
741 };
742
743 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
744 std::vector<float> kernelNoQuantizedValues =
745 {
746 1, 2, 3,
747 4, 5, 6,
748 7, 8, 9
749 };
750
751 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
752 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
753 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
754 std::vector<float> outputExpectedNoQuantizedValues =
755 {
756 6., 5., 5., 5.,
757 6., 5., 5., 5.,
758 6., 5., 5., 5.,
759 3., 2., 2., 2.
760 };
761
762 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
763 workloadFactory,
764 memoryManager,
765 inputNoQuantizedValues,
766 inputTensorInfo,
767 kernelNoQuantizedValues,
768 kernelTensorInfo,
769 outputExpectedNoQuantizedValues,
770 outputTensorInfo,
771 3,
772 3,
773 layout,
774 biasEnabled);
775}
776
777template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
778LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
779 armnn::IWorkloadFactory& workloadFactory,
780 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
781 bool biasEnabled,
782 const armnn::DataLayout layout)
783{
784 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
785 std::vector<float> inputNoQuantizedValues =
786 {
787 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
788 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
789 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
790 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
791 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
792 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
793 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
794 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
795 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
796 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
797
798 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
799 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
800 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
801 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
802 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
803 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
804 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
805 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
806 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
807 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
808 };
809
810 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
811 std::vector<float> kernelNoQuantizedValues =
812 {
813 1, 2, 3,
814 4, 5, 6,
815 7, 8, 9,
816
817 1, 2, 3,
818 4, 5, 6,
819 7, 8, 9
820 };
821
822 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
823 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
824 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
825 std::vector<float> outputExpectedNoQuantizedValues =
826 {
827 12., 10., 10., 10.,
828 12., 10., 10., 10.,
829 12., 10., 10., 10.,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100830 6., 4., 4., 4.
Teresa Charlinedeeb162019-06-14 11:09:19 +0100831 };
832
833 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
834 workloadFactory,
835 memoryManager,
836 inputNoQuantizedValues,
837 inputTensorInfo,
838 kernelNoQuantizedValues,
839 kernelTensorInfo,
840 outputExpectedNoQuantizedValues,
841 outputTensorInfo,
842 3,
843 3,
844 layout,
845 biasEnabled);
846}
847
848template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
849Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
850 armnn::IWorkloadFactory&,
851 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
852 bool,
853 armnn::DataLayout);
854
855template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
856Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
857 armnn::IWorkloadFactory&,
858 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
859 bool,
860 armnn::DataLayout);
861
862template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
863Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
864 armnn::IWorkloadFactory&,
865 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
866 bool,
867 armnn::DataLayout);
868
869template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
870Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
871 armnn::IWorkloadFactory&,
872 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
873 bool,
874 armnn::DataLayout);
875
876template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
877Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
878 armnn::IWorkloadFactory&,
879 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
880 bool,
881 armnn::DataLayout);
882
883template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
884Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
885 armnn::IWorkloadFactory&,
886 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
887 bool,
888 armnn::DataLayout);
889
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000890template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
891 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000892LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
893 armnn::IWorkloadFactory& workloadFactory,
894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
895 float qScale,
896 int32_t qOffset,
897 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000898 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100899{
telsoa01c577f2c2018-08-31 09:22:23 +0100900 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000901 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100902 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100903 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
904 {
surmeh013537c2c2018-05-18 16:31:43 +0100905 0, 1, 2, 3, 4,
906 5, 6, 7, 8, 9,
907 10, 11, 12, 13, 14,
908 15, 16, 17, 18, 19,
909 20, 21, 22, 23, 24,
910
911 25, 26, 27, 28, 29,
912 30, 31, 32, 33, 34,
913 35, 36, 37, 38, 39,
914 40, 41, 42, 43, 44,
915 45, 46, 47, 48, 49
916 })));
917
telsoa01c577f2c2018-08-31 09:22:23 +0100918 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000919 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100920 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100921 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
922 {
surmeh013537c2c2018-05-18 16:31:43 +0100923 32, 31, 30, 29,
924 28, 27, 26, 25,
925 24, 23, 22, 21,
926 20, 19, 18, 17,
927
928 16, 15, 14, 13,
929 12, 11, 10, 9,
930 8, 7, 6, 5,
931 4, 3, 2, 1
932 })));
933
telsoa01c577f2c2018-08-31 09:22:23 +0100934 // Expected output is 1 batch of a 2-channel 5x5 image.
935 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000936 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100937 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100938 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
939 {
surmeh013537c2c2018-05-18 16:31:43 +0100940 1062, 1580, 1850, 1530, 1117,
941 2140, 3108, 3500, 2842, 2042,
942 3580, 5068, 5460, 4342, 3062,
943 3618, 5072, 5390, 4248, 2971,
944 3074, 4282, 4510, 3533, 2457,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100945
surmeh013537c2c2018-05-18 16:31:43 +0100946 1550, 2284, 2362, 1955, 1428,
947 2910, 4206, 4342, 3528, 2536,
948 3390, 4886, 5022, 4068, 2916,
949 3566, 5056, 5182, 4133, 2922,
950 3100, 4352, 4452, 3517, 2465
951 })));
952
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000953 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
954 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000955 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100956 input,
957 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100958 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +0100959 expectedOutput,
960 qScale,
961 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100962 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100963 1, // Padding left.
964 1, // Padding top.
965 2, // Padding right.
966 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100967 1, // strideX
968 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000969}
970
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000971template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
972 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000973LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
974 armnn::IWorkloadFactory& workloadFactory,
975 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
976 float qScale,
977 int32_t qOffset,
978 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100979{
Teresa Charlin20b1f882019-06-19 09:34:37 +0100980 auto layout = armnn::DataLayout::NHWC;
981
982 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100983 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100984 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
985 {
986 0, 1, 2, 3, 4,
987 5, 6, 7, 8, 9,
988 10, 11, 12, 13, 14,
989 15, 16, 17, 18, 19,
990 20, 21, 22, 23, 24,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100991
Teresa Charlin20b1f882019-06-19 09:34:37 +0100992 25, 26, 27, 28, 29,
993 30, 31, 32, 33, 34,
994 35, 36, 37, 38, 39,
995 40, 41, 42, 43, 44,
996 45, 46, 47, 48, 49
Nikhil Rajcec6b652018-10-12 13:51:57 +0100997 })));
998
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000999 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001000 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001001 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1002 {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001003 32, 31, 30, 29,
1004 28, 27, 26, 25,
1005 24, 23, 22, 21,
1006 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001007
Matteo Martincigh747ef822018-12-18 09:26:39 +00001008 16, 15, 14, 13,
1009 12, 11, 10, 9,
1010 8, 7, 6, 5,
1011 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001012 })));
1013
Teresa Charlin20b1f882019-06-19 09:34:37 +01001014 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001015 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001016 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1017 {
1018 1062, 1580, 1850, 1530, 1117,
1019 2140, 3108, 3500, 2842, 2042,
1020 3580, 5068, 5460, 4342, 3062,
1021 3618, 5072, 5390, 4248, 2971,
1022 3074, 4282, 4510, 3533, 2457,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001023
Teresa Charlin20b1f882019-06-19 09:34:37 +01001024 1550, 2284, 2362, 1955, 1428,
1025 2910, 4206, 4342, 3528, 2536,
1026 3390, 4886, 5022, 4068, 2916,
1027 3566, 5056, 5182, 4133, 2922,
1028 3100, 4352, 4452, 3517, 2465
Nikhil Rajcec6b652018-10-12 13:51:57 +01001029 })));
1030
Teresa Charlin20b1f882019-06-19 09:34:37 +01001031 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001032 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001033 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001034 input,
1035 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001036 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001037 expectedOutput,
1038 qScale,
1039 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001040 layout,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001041 1, // Padding left.
1042 1, // Padding top.
1043 2, // Padding right.
1044 2, // Padding bottom.
1045 1, // strideX
1046 1); // strideY
1047}
1048
Bruno Goncalves22972f02019-04-26 21:03:24 -03001049template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1050 typename T = armnn::ResolveType<ArmnnType>>
1051LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1052 armnn::IWorkloadFactory& workloadFactory,
1053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1054 float qScale,
1055 int32_t qOffset,
1056 bool biasEnabled)
1057{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001058 auto layout = armnn::DataLayout::NHWC;
1059
1060 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001061 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001062 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1063 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001064 0, 0, 0, 0, 0, 0, 0, 0, 0,
1065 0, 0, 0, 0, 0, 0, 0, 0, 0,
1066 0, 0, 0, 0, 0, 0, 0, 0, 0,
1067 0, 0, 0, 1, 1, 1, 0, 0, 0,
1068 0, 0, 0, 1, 1, 1, 0, 0, 0,
1069 0, 0, 0, 1, 1, 1, 0, 0, 0,
1070 0, 0, 0, 0, 0, 0, 0, 0, 0,
1071 0, 0, 0, 0, 0, 0, 0, 0, 0,
1072 0, 0, 0, 0, 0, 0, 0, 0, 0
1073 })));
1074
1075 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1076 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001077 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1078 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001079 1, 2, 3,
1080 4, 5, 6,
1081 7, 8, 9
1082 })));
1083
1084 uint32_t padLeft = 0;
1085 uint32_t padTop = 0;
1086 uint32_t padRight = 0;
1087 uint32_t padBottom = 0;
1088 uint32_t strideX = 1;
1089 uint32_t strideY = 1;
1090 uint32_t dilationX = 3;
1091 uint32_t dilationY = 3;
1092
1093 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Teresa Charlin20b1f882019-06-19 09:34:37 +01001094 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001095 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001096 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1097 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001098 5, 5, 5,
1099 5, 5, 5,
1100 5, 5, 5
1101 })));
1102
Teresa Charlin20b1f882019-06-19 09:34:37 +01001103 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Bruno Goncalves22972f02019-04-26 21:03:24 -03001104 workloadFactory,
1105 memoryManager,
1106 input,
1107 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001108 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001109 expectedOutput,
1110 qScale,
1111 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001112 layout,
Bruno Goncalves22972f02019-04-26 21:03:24 -03001113 padLeft,
1114 padTop,
1115 padRight,
1116 padBottom,
1117 strideX,
1118 strideY,
1119 dilationX,
1120 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001121}
1122
Teresa Charlin20b1f882019-06-19 09:34:37 +01001123
1124template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1125LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1126 armnn::IWorkloadFactory& workloadFactory,
1127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1128 const std::vector<float>& inputNoQuantizedValues,
1129 armnn::TensorInfo& inputTensorInfo,
1130 const std::vector<float>& kernelNoQuantizedValues,
1131 armnn::TensorInfo& kernelTensorInfo,
1132 const std::vector<float>& outputExpectedNoQuantizedValues,
1133 armnn::TensorInfo& outputTensorInfo,
1134 uint32_t dilationX,
1135 uint32_t dilationY,
1136 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1137 bool biasEnabled = false)
1138{
1139 float qScale;
1140 int32_t qOffset;
1141 switch (ArmnnType)
1142 {
1143 case armnn::DataType::QuantisedAsymm8:
1144 {
1145 qScale = 0.1f;
1146 qOffset = 128;
1147 break;
1148 }
1149 case armnn::DataType::QuantisedSymm16:
1150 {
1151 qScale = 0.1f;
1152 qOffset = 0;
1153 break;
1154 }
1155 case armnn::DataType::Float32:
1156 default:
1157 {
1158 qScale = 0.f;
1159 qOffset = 0;
1160 break;
1161 }
1162 }
1163
1164 inputTensorInfo.SetQuantizationScale(qScale);
1165 inputTensorInfo.SetQuantizationOffset(qOffset);
1166 kernelTensorInfo.SetQuantizationScale(qScale);
1167 kernelTensorInfo.SetQuantizationOffset(qOffset);
1168 outputTensorInfo.SetQuantizationScale(qScale);
1169 outputTensorInfo.SetQuantizationOffset(qOffset);
1170
1171 auto input = MakeTensor<T, 4>(inputTensorInfo,
1172 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1173 inputTensorInfo.GetQuantizationOffset(),
1174 inputNoQuantizedValues)));
1175 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1176 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1177 kernelTensorInfo.GetQuantizationOffset(),
1178 kernelNoQuantizedValues)));
1179 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1180 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1181 outputTensorInfo.GetQuantizationOffset(),
1182 outputExpectedNoQuantizedValues)));
1183
1184 uint32_t padLeft = 0;
1185 uint32_t padTop = 0;
1186 uint32_t padRight = 0;
1187 uint32_t padBottom = 0;
1188 uint32_t strideX = 1;
1189 uint32_t strideY = 1;
1190
1191 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1192 workloadFactory,
1193 memoryManager,
1194 input,
1195 kernel,
1196 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1197 expectedOutput,
1198 qScale,
1199 qOffset,
1200 layout,
1201 padLeft,
1202 padTop,
1203 padRight,
1204 padBottom,
1205 strideX,
1206 strideY,
1207 dilationX,
1208 dilationY);
1209}
1210
1211template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1212LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1213 armnn::IWorkloadFactory& workloadFactory,
1214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1215 bool biasEnabled,
1216 const armnn::DataLayout layout)
1217{
1218 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1219 std::vector<float> inputNoQuantizedValues =
1220 {
1221 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1222 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1223 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1224 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1225 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1226 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1227 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1228 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1230 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1231 };
1232
1233 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1234 std::vector<float> kernelNoQuantizedValues =
1235 {
1236 1, 2, 3,
1237 4, 5, 6,
1238 7, 8, 9
1239 };
1240
1241 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1242 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1243 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1244 std::vector<float> outputExpectedNoQuantizedValues =
1245 {
1246 6., 5., 5., 5.,
1247 6., 5., 5., 5.,
1248 6., 5., 5., 5.,
1249 3., 2., 2., 2.
1250 };
1251
1252 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1253 workloadFactory,
1254 memoryManager,
1255 inputNoQuantizedValues,
1256 inputTensorInfo,
1257 kernelNoQuantizedValues,
1258 kernelTensorInfo,
1259 outputExpectedNoQuantizedValues,
1260 outputTensorInfo,
1261 3,
1262 3,
1263 layout,
1264 biasEnabled);
1265}
1266
1267template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1268LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1269 armnn::IWorkloadFactory& workloadFactory,
1270 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1271 bool biasEnabled,
1272 const armnn::DataLayout layout)
1273{
1274 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1275 std::vector<float> inputNoQuantizedValues =
1276 {
1277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1280 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1281 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1282 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1283 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1284 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1285 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1287
1288 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1289 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1290 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1291 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1292 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1293 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1294 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1296 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1297 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1298 };
1299
1300 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1301 std::vector<float> kernelNoQuantizedValues =
1302 {
1303 1, 2, 3,
1304 4, 5, 6,
1305 7, 8, 9,
1306
1307 1, 2, 3,
1308 4, 5, 6,
1309 7, 8, 9
1310 };
1311
1312 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1313 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1314 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1315 std::vector<float> outputExpectedNoQuantizedValues =
1316 {
1317 6., 5., 5., 5.,
1318 6., 5., 5., 5.,
1319 6., 5., 5., 5.,
1320 3., 2., 2., 2.,
1321
1322 6., 5., 5., 5.,
1323 6., 5., 5., 5.,
1324 6., 5., 5., 5.,
1325 3., 2., 2., 2.
1326 };
1327
1328 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1329 workloadFactory,
1330 memoryManager,
1331 inputNoQuantizedValues,
1332 inputTensorInfo,
1333 kernelNoQuantizedValues,
1334 kernelTensorInfo,
1335 outputExpectedNoQuantizedValues,
1336 outputTensorInfo,
1337 3,
1338 3,
1339 layout,
1340 biasEnabled);
1341}
1342
1343
1344template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1345DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1346 armnn::IWorkloadFactory&,
1347 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1348 bool,
1349 armnn::DataLayout);
1350
1351template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1352DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1353 armnn::IWorkloadFactory&,
1354 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1355 bool,
1356 armnn::DataLayout);
1357
1358template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1359DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1360 armnn::IWorkloadFactory&,
1361 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1362 bool,
1363 armnn::DataLayout);
1364
1365template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1366DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1367 armnn::IWorkloadFactory&,
1368 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1369 bool,
1370 armnn::DataLayout);
1371
1372template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1373DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1374 armnn::IWorkloadFactory&,
1375 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1376 bool,
1377 armnn::DataLayout);
1378
1379template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1380DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1381 armnn::IWorkloadFactory&,
1382 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1383 bool,
1384 armnn::DataLayout);
1385
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001386LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1387 armnn::IWorkloadFactory& workloadFactory,
1388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1389 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001390 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001391{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001392 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001393 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001394}
1395
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001396LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1397 armnn::IWorkloadFactory& workloadFactory,
1398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1399 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001401 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1402 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001403}
1404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001405LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1406 armnn::IWorkloadFactory& workloadFactory,
1407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1408 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001409 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001410{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001411 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001412 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001413}
1414
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001415LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1416 armnn::IWorkloadFactory& workloadFactory,
1417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1418 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001419 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001420{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001421 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001422 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001423}
1424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001425LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1426 armnn::IWorkloadFactory& workloadFactory,
1427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1428 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001429 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001430{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001431 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001432 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001433}
1434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001435LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1436 armnn::IWorkloadFactory& workloadFactory,
1437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1438 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001439 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001440{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001441 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001442 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001443}
1444
Bruno Goncalves22972f02019-04-26 21:03:24 -03001445LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1446 armnn::IWorkloadFactory& workloadFactory,
1447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1448{
1449 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001450 workloadFactory,
1451 memoryManager,
1452 0.f,
1453 0,
1454 false);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001455}
1456
Ruomei Yan88d44b82019-05-23 14:29:06 +01001457LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1458 armnn::IWorkloadFactory& workloadFactory,
1459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1460 bool biasEnabled,
1461 const armnn::DataLayout layout)
1462{
1463 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1464 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1465}
1466
1467LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1468 armnn::IWorkloadFactory& workloadFactory,
1469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1470 bool biasEnabled,
1471 const armnn::DataLayout layout)
1472{
1473 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1474 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1475}
1476
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001477LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001478 armnn::IWorkloadFactory& workloadFactory,
1479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1480 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001481 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001482{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001483 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1484 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001485}
1486
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001487LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1488 armnn::IWorkloadFactory& workloadFactory,
1489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1490 armnn::IWorkloadFactory& refWorkloadFactory,
1491 const armnn::DataLayout layout)
1492{
1493 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1494 workloadFactory, memoryManager, refWorkloadFactory, layout);
1495}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001496
1497LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1498 armnn::IWorkloadFactory& workloadFactory,
1499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001500{
1501 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1502 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001503 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001504}
1505
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001506LayerTestResult<float,4> SimpleNormalizationWithinTest(
1507 armnn::IWorkloadFactory& workloadFactory,
1508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001509{
1510 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1511 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001512 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001513}
1514
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001515LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1516 armnn::IWorkloadFactory& workloadFactory,
1517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001518{
1519 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1520 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001521 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001522}
1523
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001524LayerTestResult<float,2> SimpleSoftmaxTest(
1525 armnn::IWorkloadFactory& workloadFactory,
1526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1527 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001528{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001529 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001530}
1531
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001532LayerTestResult<float,3> Simple3dSoftmaxTest(
1533 armnn::IWorkloadFactory& workloadFactory,
1534 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1535 float beta)
1536{
1537 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1538}
1539
1540LayerTestResult<float,4> Simple4dSoftmaxTest(
1541 armnn::IWorkloadFactory& workloadFactory,
1542 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1543 float beta)
1544{
1545 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1546}
1547
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001548LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1549 armnn::IWorkloadFactory& workloadFactory,
1550 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1551 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001552{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001553 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001554}
1555
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001556LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1557 armnn::IWorkloadFactory& workloadFactory,
1558 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1559 float beta)
1560{
1561 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1562}
1563
1564LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1565 armnn::IWorkloadFactory& workloadFactory,
1566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1567 float beta)
1568{
1569 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1570}
1571
nikraj01248683f2019-05-29 16:46:50 +01001572LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1573 armnn::IWorkloadFactory& workloadFactory,
1574 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1575 float beta)
1576{
1577 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1578}
1579
1580LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1581 armnn::IWorkloadFactory& workloadFactory,
1582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1583 float beta)
1584{
1585 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1586}
1587
1588LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
1589 armnn::IWorkloadFactory& workloadFactory,
1590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1591 float beta)
1592{
1593 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1594}
1595
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001596LayerTestResult<float,4> CompareNormalizationTest(
1597 armnn::IWorkloadFactory& workloadFactory,
1598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1599 armnn::IWorkloadFactory& refWorkloadFactory,
1600 armnn::NormalizationAlgorithmChannel normChannel,
1601 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001602{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001603 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001604}
1605
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001606LayerTestResult<float,2> CompareSoftmaxTest(
1607 armnn::IWorkloadFactory& workloadFactory,
1608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001609 armnn::IWorkloadFactory& refWorkloadFactory,
1610 float beta)
1611{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001612 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1613 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001614}
1615
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001616LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1617 armnn::IWorkloadFactory& workloadFactory,
1618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001619 armnn::IWorkloadFactory& refWorkloadFactory,
1620 float beta)
1621{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001622 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1623 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001624}
1625
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001626std::vector<LayerTestResult<float,3>> SplitterTest(
1627 armnn::IWorkloadFactory& workloadFactory,
1628 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001629{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001630 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001631}
1632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001633std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1634 armnn::IWorkloadFactory& workloadFactory,
1635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001636{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001637 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001638}
1639
Ruomei Yan25339c32019-05-28 16:48:20 +01001640std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
1641 armnn::IWorkloadFactory& workloadFactory,
1642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1643{
1644 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1645}
1646
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001647LayerTestResult<float, 3> CopyViaSplitterTest(
1648 armnn::IWorkloadFactory& workloadFactory,
1649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001650{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001651 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001652}
1653
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001654LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1655 armnn::IWorkloadFactory& workloadFactory,
1656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001657{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001658 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001659}
1660
Ruomei Yan25339c32019-05-28 16:48:20 +01001661LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
1662 armnn::IWorkloadFactory& workloadFactory,
1663 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1664{
1665 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1666}
1667
Jan Eilers38e05bd2019-06-26 13:10:09 +01001668void LstmUtilsZeroVectorTest()
1669{
1670 armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
1671 boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1672 {2., 3., 3., 4.}));
1673
1674 boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1675 {0., 0., 0., 0.}));
1676
1677 return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
1678}
1679
1680void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
1681{
1682 uint32_t batchSize = 2;
1683 uint32_t vecSize = 4;
1684 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1685 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1686 { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
1687 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
1688
1689 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1690 { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
1691 -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
1692
1693 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1694 vecSize, batchSize, expectedOutput);
1695}
1696
1697void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
1698{
1699 uint32_t batchSize = 2;
1700 uint32_t vecSize = 4;
1701 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1702 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1703 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1704 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
1705
1706 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1707 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1708 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
1709
1710 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1711 vecSize, batchSize, expectedOutput);
1712}
1713
1714void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
1715{
1716 uint32_t batchSize = 2;
1717 uint32_t vecSize = 4;
1718 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1719 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1720 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1721 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
1722
1723 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1724 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1725 -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
1726
1727 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1728 vecSize, batchSize, expectedOutput);
1729}
1730
1731
1732void LstmUtilsVectorBatchVectorCwiseProductTest()
1733{
1734 uint32_t batchSize = 4;
1735 uint32_t vecSize = 29;
1736 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1737 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1738 { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
1739 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1740 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
1741
1742 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1743 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1744 { /* batch 0 */
1745 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
1746 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1747 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
1748 /* batch 1 */
1749 -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.1f,
1750 -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
1751 -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f,
1752 /* batch 2 */
1753 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.1f,
1754 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
1755 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f,
1756 /* batch 3 */
1757 -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
1758 -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
1759 -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
1760
1761 // Expect output = input * output + output.
1762 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1763 { /* batch 0 */
1764 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
1765 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
1766 172.396896f, 199.939606f, 229.522491f, 261.145599f, 294.808899f, 330.512421f,
1767 368.256134f, 408.040039f, 449.864075f, 493.728363f, 539.632874f, 587.577576f,
1768 637.562500f, 689.587585f, 743.652954f, 799.758423f, 0.000000f,
1769 /* batch 1 */
1770 -1.210000f, -4.840000f, -10.889999f, -19.360001f, -30.250000f, -43.559998f,
1771 -59.289997f, -77.440002f, -98.009995f, -102.010010f, -123.432091f, -146.894394f,
1772 -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
1773 -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
1774 -637.562500f, -689.587585f, -743.652954f, -799.758423f, 0.000000f,
1775 /* batch 2 */
1776 1.210000f, -4.840000f, 10.889999f, -19.360001f, 30.250000f, -43.559998f,
1777 59.289997f, -77.440002f, 98.009995f, -102.010010f, 123.432091f, -146.894394f,
1778 172.396896f, -199.939606f, 229.522491f, -261.145599f, 294.808899f, -330.512421f,
1779 368.256134f, -408.040039f, 449.864075f, -493.728363f, 539.632874f, -587.577576f,
1780 637.562500f, -689.587585f, 743.652954f, -799.758423f, 0.000000f,
1781 /* batch 3 */
1782 -1.210000f, 4.840000f, -10.889999f, 19.360001f, -30.250000f, 43.559998f,
1783 -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
1784 -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
1785 -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
1786 -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
1787
1788 return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
1789 vecSize, batchSize, expectedOutput);
1790}
1791
1792
1793void LstmUtilsVectorBatchVectorAddTest()
1794{
1795 uint32_t batchSize = 2;
1796 uint32_t vecSize = 3;
1797 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1798 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1799 { 0.0f, -0.5f, 1.0f}));
1800
1801 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1802 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1803 { 1.0f, 2.0f, 3.0f, //batch 0
1804 4.0f, 5.0f, 6.0f})); //batch 1
1805
1806 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1807 { 1.0f, 1.5f, 4.0f,
1808 4.0f, 4.5f, 7.0f}));
1809
1810 return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
1811 vecSize, batchSize, expectedOutput);
1812}
1813
1814
telsoa01c577f2c2018-08-31 09:22:23 +01001815LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001816 armnn::IWorkloadFactory& workloadFactory,
1817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001818{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001819 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001820 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1821 { 2., 3., 3., 4. }));
1822
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001823 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001824 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1825 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1826 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001827 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001828 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001829}
1830
1831LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001832 armnn::IWorkloadFactory& workloadFactory,
1833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001834{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001835 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001836 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1837 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1838 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1839
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001840 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001841 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1842 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1843 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1844 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1845 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1846 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1847 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1848 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001849 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1850 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001851}
1852
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001853LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1854 armnn::IWorkloadFactory& workloadFactory,
1855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001856{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001857 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001858 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1859 {2., 3., 3., 4.}));
1860
1861
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001862 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001863 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1864 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1865 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1866
Conor Kennedyb9971c92019-05-07 07:14:23 +01001867 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001868 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001869}
1870
Jan Eilers38e05bd2019-06-26 13:10:09 +01001871
1872LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
1873 armnn::IWorkloadFactory& workloadFactory,
1874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1875{
1876 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
1877 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1878 {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
1879 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
1880
1881 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
1882 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1883 { 0.0244077f, 0.128027f, -0.00170918f, //batch 0
1884 -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
1885 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
1886 workloadFactory, memoryManager, input, expectedOutput);
1887}
1888
1889
Conor Kennedyb9971c92019-05-07 07:14:23 +01001890LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1891 armnn::IWorkloadFactory& workloadFactory,
1892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1893{
1894 const float qScale = 1.0f;
1895 const int32_t qOffset = 0;
1896
1897 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1898 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1899
1900 armnn::TensorInfo inputDesc({2, 2}, datatype);
1901 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1902 std::vector<float>{2., 3., 3., 4.}));
1903
1904 armnn::TensorInfo outputDesc({2, 4}, datatype);
1905 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1906 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1907 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1908
1909 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1910 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1911
1912}
1913
1914LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1915 armnn::IWorkloadFactory& workloadFactory,
1916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1917{
1918 const float qScale = 1.0f;
1919 const int32_t qOffset = 0;
1920
1921 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1922 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1923
1924 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1925 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1926 std::vector<float>({ 2., 3., 3., 4. })));
1927
1928 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1929 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1930 qOffset, std::vector<float>(
1931 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1932 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1933
1934 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
1935 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1936}
1937
1938LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
1939 armnn::IWorkloadFactory& workloadFactory,
1940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1941{
1942 const float qScale = 2.0f;
1943 const int32_t qOffset = 0;
1944
1945 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1946 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1947
1948 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
1949 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1950 qOffset, std::vector<float>(
1951 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1952 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
1953
1954 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
1955 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1956 qOffset, std::vector<float>(
1957 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1958 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1959 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1960 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1961 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1962 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
1963
1964 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
1965 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1966}
1967
1968LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
1969 armnn::IWorkloadFactory& workloadFactory,
1970 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1971{
1972 const float qScale = 1.0f;
1973 const int32_t qOffset = 0;
1974
1975 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
1976
1977 armnn::TensorInfo inputDesc({2, 2}, datatype);
1978 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1979 qOffset, std::vector<float>{2., 3., 3., 4.}));
1980
1981 armnn::TensorInfo outputDesc({2, 4}, datatype);
1982 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1983 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1984 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1985
1986 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1987 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
1988}
1989
Jim Flynn4ed6c832019-05-20 11:02:46 +01001990LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001991 armnn::IWorkloadFactory& workloadFactory,
1992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001993{
surmeh013537c2c2018-05-18 16:31:43 +01001994 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001995 unsigned int outputHeight = 6;
1996 unsigned int outputChannels = 3;
1997
surmeh013537c2c2018-05-18 16:31:43 +01001998 unsigned int inputWidth1 = 3;
1999 unsigned int inputHeight1 = 6;
2000 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00002001
surmeh013537c2c2018-05-18 16:31:43 +01002002 unsigned int inputWidth2 = 3;
2003 unsigned int inputHeight2 = 6;
2004 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00002005
telsoa01c577f2c2018-08-31 09:22:23 +01002006 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00002007 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
2008 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
2009 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00002010
2011 LayerTestResult<float,3> ret(outputTensorInfo);
2012
telsoa014fcda012018-03-09 14:13:49 +00002013 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01002014 {
2015 1.0f, 2.0f, 3.0f,
2016 4.0f, 5.0f, 6.0f,
2017 7.0f, 8.0f, 9.0f,
2018 10.0f, 11.0f, 12.0f,
2019 13.0f, 14.0f, 15.0f,
2020 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002021
surmeh013537c2c2018-05-18 16:31:43 +01002022 19.0f, 20.0f, 21.0f,
2023 22.0f, 23.0f, 24.0f,
2024 25.0f, 26.0f, 27.0f,
2025 28.0f, 29.0f, 30.0f,
2026 31.0f, 32.0f, 33.0f,
2027 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002028
surmeh013537c2c2018-05-18 16:31:43 +01002029 37.0f, 38.0f, 39.0f,
2030 40.0f, 41.0f, 42.0f,
2031 43.0f, 44.0f, 45.0f,
2032 46.0f, 47.0f, 48.0f,
2033 49.0f, 50.0f, 51.0f,
2034 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002035 })
2036 );
2037
telsoa014fcda012018-03-09 14:13:49 +00002038 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2039 {
surmeh013537c2c2018-05-18 16:31:43 +01002040 1.0f, 2.0f, 3.0f,
2041 4.0f, 5.0f, 6.0f,
2042 7.0f, 8.0f, 9.0f,
2043 10.0f, 11.0f, 12.0f,
2044 13.0f, 14.0f, 15.0f,
2045 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002046
surmeh013537c2c2018-05-18 16:31:43 +01002047 19.0f, 20.0f, 21.0f,
2048 22.0f, 23.0f, 24.0f,
2049 25.0f, 26.0f, 27.0f,
2050 28.0f, 29.0f, 30.0f,
2051 31.0f, 32.0f, 33.0f,
2052 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002053 })
2054 );
2055
2056 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2057 {
surmeh013537c2c2018-05-18 16:31:43 +01002058 37.0f, 38.0f, 39.0f,
2059 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00002060 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01002061 46.0f, 47.0f, 48.0f,
2062 49.0f, 50.0f, 51.0f,
2063 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002064 })
2065 );
2066
telsoa01c577f2c2018-08-31 09:22:23 +01002067 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01002068 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00002069
telsoa01c577f2c2018-08-31 09:22:23 +01002070 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01002071 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00002072
telsoa014fcda012018-03-09 14:13:49 +00002073 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2074
2075 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2076
2077 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
2078 subTensorsSupported ?
2079 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2080 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2081
2082 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
2083 subTensorsSupported ?
2084 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2085 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2086
Jim Flynne242f2d2019-05-22 14:24:13 +01002087 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00002088 armnn::WorkloadInfo info;
2089 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2090 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00002091 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2092
2093 data.m_ViewOrigins.push_back(window1);
2094 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00002095
Jim Flynn4ed6c832019-05-20 11:02:46 +01002096 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00002097
2098 inputHandle1->Allocate();
2099 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00002100 outputHandle->Allocate();
2101
2102 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2103 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00002104
Derek Lambertif30f7d32019-04-09 10:25:02 +01002105 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002106 workload->Execute();
2107
2108 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2109
2110 return ret;
2111}
2112
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002113LayerTestResult<float,4> AdditionTest(
2114 armnn::IWorkloadFactory& workloadFactory,
2115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002116{
2117 unsigned int batchSize = 2;
2118 unsigned int channels = 2;
2119 unsigned int height = 2;
2120 unsigned int width = 3;
2121
2122 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2123 armnn::TensorInfo outputTensorInfo;
2124
2125 unsigned int shape[] = {batchSize, channels, height, width};
2126
2127 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2128 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2129 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2130
2131
2132 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
2133 {
2134 0.0f, 2.0f, 1.0f,
2135 0.2f, 1.0f, 2.0f,
2136
2137 1.0f, 2.0f, 1.0f,
2138 0.2f, 1.0f, 2.0f,
2139
2140 0.0f, 2.0f, 1.0f,
2141 4.2f, 1.0f, 2.0f,
2142
2143 0.0f, 0.0f, 1.0f,
2144 0.2f, 1.0f, 2.0f,
2145 }));
2146
2147 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
2148 {
2149 1.0f, 2.0f, 1.0f,
2150 0.0f, 1.0f, 2.0f,
2151
2152 1.0f, 2.0f, -2.0f,
2153 0.2f, 1.0f, 2.0f,
2154
2155 0.0f, 2.0f, 1.0f,
2156 4.2f, 0.0f, -3.0f,
2157
2158 0.0f, 0.0f, 1.0f,
2159 0.7f, 1.0f, 5.0f,
2160 }));
2161
2162 LayerTestResult<float,4> ret(outputTensorInfo);
2163 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
2164 {
2165 1.0f, 4.0f, 2.0f,
2166 0.2f, 2.0f, 4.0f,
2167
2168 2.0f, 4.0f, -1.0f,
2169 0.4f, 2.0f, 4.0f,
2170
2171 0.0f, 4.0f, 2.0f,
2172 8.4f, 1.0f, -1.0f,
2173
2174 0.0f, 0.0f, 2.0f,
2175 0.9f, 2.0f, 7.0f,
2176 }));
2177
2178 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2179 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2180 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2181
2182 armnn::AdditionQueueDescriptor data;
2183 armnn::WorkloadInfo info;
2184 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2185 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2186 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2187
2188 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2189
2190 inputHandle1->Allocate();
2191 inputHandle2->Allocate();
2192 outputHandle->Allocate();
2193
2194 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2195 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2196
Derek Lambertif30f7d32019-04-09 10:25:02 +01002197 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002198 workload->Execute();
2199
2200 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2201
2202 return ret;
2203}
2204
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002205template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002206LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2207 armnn::IWorkloadFactory& workloadFactory,
2208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002209 float qScale,
2210 int32_t qOffset)
2211{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002212 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2213 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2214 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002215
2216 if (armnn::IsQuantizedType<T>())
2217 {
2218 inputTensorInfo1.SetQuantizationScale(qScale);
2219 inputTensorInfo1.SetQuantizationOffset(qOffset);
2220 inputTensorInfo2.SetQuantizationScale(qScale);
2221 inputTensorInfo2.SetQuantizationOffset(qOffset);
2222 outputTensorInfo.SetQuantizationScale(qScale);
2223 outputTensorInfo.SetQuantizationOffset(qOffset);
2224 }
2225
2226 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2227 {
2228 0.0f,
2229 1.0f,
2230
2231 2.0f,
2232 3.0f,
2233
2234 4.0f,
2235 5.0f,
2236 }));
2237
2238 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2239 {
2240 0.5f, 1.5f, 2.5f,
2241 3.5f, 4.5f, 5.5f,
2242 }));
2243
2244 LayerTestResult<T,4> ret(outputTensorInfo);
2245 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2246 {
2247 0.5f, 1.5f, 2.5f,
2248 4.5f, 5.5f, 6.5f,
2249
2250 2.5f, 3.5f, 4.5f,
2251 6.5f, 7.5f, 8.5f,
2252
2253 4.5f, 5.5f, 6.5f,
2254 8.5f, 9.5f, 10.5f,
2255 }));
2256
2257 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2258 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2259 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2260
2261 armnn::AdditionQueueDescriptor data;
2262 armnn::WorkloadInfo info;
2263 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2264 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2265 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2266
2267 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2268
2269 inputHandle1->Allocate();
2270 inputHandle2->Allocate();
2271 outputHandle->Allocate();
2272
2273 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2274 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2275
Derek Lambertif30f7d32019-04-09 10:25:02 +01002276 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002277 workload->Execute();
2278
2279 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2280
2281 return ret;
2282}
2283
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002284template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002285LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2286 armnn::IWorkloadFactory& workloadFactory,
2287 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002288 float qScale,
2289 int32_t qOffset)
2290{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002291 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2292 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2293 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002294
2295 if (armnn::IsQuantizedType<T>())
2296 {
2297 inputTensorInfo1.SetQuantizationScale(qScale);
2298 inputTensorInfo1.SetQuantizationOffset(qOffset);
2299 inputTensorInfo2.SetQuantizationScale(qScale);
2300 inputTensorInfo2.SetQuantizationOffset(qOffset);
2301 outputTensorInfo.SetQuantizationScale(qScale);
2302 outputTensorInfo.SetQuantizationOffset(qOffset);
2303 }
2304
2305 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2306 {
2307 0.0f, 1.0f, 2.0f,
2308 3.0f, 4.0f, 5.0f,
2309 6.0f, 7.0f, 8.0f,
2310 9.0f, 10.0f, 11.0f,
2311 12.0f, 13.0f, 14.0f,
2312 15.0f, 16.0f, 17.0f,
2313 }));
2314
2315 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2316 {
2317 0.5f,
2318 }));
2319
2320 LayerTestResult<T,4> ret(outputTensorInfo);
2321 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2322 {
2323 0.5f, 1.5f, 2.5f,
2324 3.5f, 4.5f, 5.5f,
2325 6.5f, 7.5f, 8.5f,
2326 9.5f, 10.5f, 11.5f,
2327 12.5f, 13.5f, 14.5f,
2328 15.5f, 16.5f, 17.5f,
2329 }));
2330
2331 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2332 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2333 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2334
2335 armnn::AdditionQueueDescriptor data;
2336 armnn::WorkloadInfo info;
2337 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2338 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2339 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2340
2341 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2342
2343 inputHandle1->Allocate();
2344 inputHandle2->Allocate();
2345 outputHandle->Allocate();
2346
2347 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2348 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2349
Derek Lambertif30f7d32019-04-09 10:25:02 +01002350 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002351 workload->Execute();
2352
2353 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2354
2355 return ret;
2356}
2357
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002358LayerTestResult<float, 4> AdditionBroadcastTest(
2359 armnn::IWorkloadFactory& workloadFactory,
2360 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002361{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002362 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2363 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002364}
2365
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002366LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2367 armnn::IWorkloadFactory& workloadFactory,
2368 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002369{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002370 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2371 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002372}
2373
Sadik Armagan2999a022019-04-09 14:20:12 +01002374LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2375 armnn::IWorkloadFactory& workloadFactory,
2376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2377{
2378 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2379 workloadFactory, memoryManager, 2.f, 0);
2380}
2381
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002382LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2383 armnn::IWorkloadFactory& workloadFactory,
2384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002385{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002386 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2387 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002388}
2389
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002390LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2391 armnn::IWorkloadFactory& workloadFactory,
2392 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002393{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002394 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2395 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00002396}
2397
Sadik Armagan2999a022019-04-09 14:20:12 +01002398LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2399 armnn::IWorkloadFactory& workloadFactory,
2400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2401{
2402 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2403 workloadFactory, memoryManager, 0.1333333f, 0);
2404}
2405
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002406LayerTestResult<float,4> CompareAdditionTest(
2407 armnn::IWorkloadFactory& workloadFactory,
2408 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2409 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002410{
2411 unsigned int batchSize = 4;
2412 unsigned int channels = 1;
2413 unsigned int height = 2;
2414 unsigned int width = 3;
2415
2416 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2417 armnn::TensorInfo outputTensorInfo;
2418
2419 unsigned int shape[] = {batchSize, channels, height, width};
2420
2421 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2422 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2423 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2424
2425 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2426 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2427
2428 LayerTestResult<float,4> ret(outputTensorInfo);
2429
2430 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2431 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2432 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2433
2434 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2435 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2436 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2437
2438 armnn::AdditionQueueDescriptor data;
2439 armnn::WorkloadInfo info;
2440 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2441 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2442 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2443
2444 armnn::AdditionQueueDescriptor refData = data;
2445 armnn::WorkloadInfo refInfo = info;
2446 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2447 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2448 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2449
2450 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2451 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2452
2453 inputHandle1->Allocate();
2454 inputHandle2->Allocate();
2455 outputHandle->Allocate();
2456 inputHandle1Ref->Allocate();
2457 inputHandle2Ref->Allocate();
2458 outputHandleRef->Allocate();
2459
2460 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2461 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2462 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2463 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2464
Derek Lambertif30f7d32019-04-09 10:25:02 +01002465 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002466 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01002467 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002468 workloadRef->Execute();
2469
2470 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2471 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2472
2473 return ret;
2474}
2475
surmeh01bceff2f2018-03-29 16:29:27 +01002476namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01002477template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002478LayerTestResult<T, 4> DivisionTestHelper(
2479 armnn::IWorkloadFactory& workloadFactory,
2480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2481 const unsigned int shape0[4],
2482 const std::vector<T>& values0,
2483 float scale0,
2484 int32_t offset0,
2485 const unsigned int shape1[4],
2486 const std::vector<T> & values1,
2487 float scale1,
2488 int32_t offset1,
2489 const unsigned int outShape[4],
2490 const std::vector<T> & outValues,
2491 float outScale,
2492 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01002493{
Sadik Armagan2999a022019-04-09 14:20:12 +01002494 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2495 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2496 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002497
David Beck5cd01f32018-09-12 16:00:08 +01002498 inputTensorInfo0.SetQuantizationScale(scale0);
2499 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002500
David Beck5cd01f32018-09-12 16:00:08 +01002501 inputTensorInfo1.SetQuantizationScale(scale1);
2502 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002503
David Beck5cd01f32018-09-12 16:00:08 +01002504 outputTensorInfo.SetQuantizationScale(outScale);
2505 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002506
David Beck5cd01f32018-09-12 16:00:08 +01002507 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2508 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002509
David Beck5cd01f32018-09-12 16:00:08 +01002510 LayerTestResult<T, 4> result(outputTensorInfo);
2511 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002512
David Beck5cd01f32018-09-12 16:00:08 +01002513 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2514 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2515 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002516
David Beck5cd01f32018-09-12 16:00:08 +01002517 armnn::DivisionQueueDescriptor data;
2518 armnn::WorkloadInfo info;
2519 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2520 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2521 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002522
David Beck5cd01f32018-09-12 16:00:08 +01002523 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002524
David Beck5cd01f32018-09-12 16:00:08 +01002525 inputHandle0->Allocate();
2526 inputHandle1->Allocate();
2527 outputHandle->Allocate();
2528
2529 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2530 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2531
Derek Lambertif30f7d32019-04-09 10:25:02 +01002532 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01002533 workload->Execute();
2534
2535 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2536
2537 return result;
2538}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002539} // anonymous namespace
2540
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002541LayerTestResult<float,4> DivisionByZeroTest(
2542 armnn::IWorkloadFactory& workloadFactory,
2543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002544{
2545 const unsigned int width = 2;
2546 const unsigned int height = 2;
2547 const unsigned int channelCount = 2;
2548 const unsigned int batchSize = 2;
2549
2550 unsigned int shape[] = { batchSize, channelCount, height, width };
2551
2552 std::vector<float> input0({
2553 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
2554 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
2555
2556 std::vector<float> input1({
2557 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
2558 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
2559
2560 std::vector<float> output({
2561 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
2562 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
2563
Sadik Armagan2999a022019-04-09 14:20:12 +01002564 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2565 memoryManager,
2566 shape, input0, 1.0f, 0,
2567 shape, input1, 1.0f, 0,
2568 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002569}
2570
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002571LayerTestResult<float,4> DivisionTest(
2572 armnn::IWorkloadFactory& workloadFactory,
2573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002574{
2575 const unsigned int width = 2;
2576 const unsigned int height = 2;
2577 const unsigned int channelCount = 2;
2578 const unsigned int batchSize = 2;
2579
2580 unsigned int shape[] = { batchSize, channelCount, height, width };
2581
2582 std::vector<float> input0({
2583 2, 2, 2, 2, 3, 3, 3, 3,
2584 4, 4, 4, 4, 5, 5, 5, 5 });
2585
2586 std::vector<float> input1({
2587 1, 1, 1, 1, 2, 2, 2, 2,
2588 4, 4, 4, 4, 4, 4, 4, 4 });
2589
2590 std::vector<float> output({
2591 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
2592 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
2593
David Beck5cd01f32018-09-12 16:00:08 +01002594
Sadik Armagan2999a022019-04-09 14:20:12 +01002595 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2596 memoryManager,
2597 shape, input0, 1.0f, 0,
2598 shape, input1, 1.0f, 0,
2599 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002600}
2601
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002602LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
2603 armnn::IWorkloadFactory& workloadFactory,
2604 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002605{
2606 unsigned int shape0[] = { 1, 2, 2, 2 };
2607 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2608
2609 unsigned int shape1[] = { 1, 1, 1, 1 };
2610 std::vector<float> input1({ 2 });
2611
2612 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2613
David Beck5cd01f32018-09-12 16:00:08 +01002614
Sadik Armagan2999a022019-04-09 14:20:12 +01002615 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2616 memoryManager,
2617 shape0, input0, 1.0f, 0,
2618 shape1, input1, 1.0f, 0,
2619 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002620}
2621
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002622LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
2623 armnn::IWorkloadFactory& workloadFactory,
2624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002625{
2626 unsigned int shape0[] = { 1, 3, 3, 2 };
2627 std::vector<float> input0({
2628 1, 4, 3, 8, 5, 12,
2629 7, 16, 9, 20, 11, 24,
2630 13, 28, 15, 32, 17, 36});
2631
2632 unsigned int shape1[] = { 1, 1, 1, 2 };
2633 std::vector<float> input1({ 1, 2 });
2634
2635 std::vector<float> output({
2636 1, 2, 3, 4, 5, 6,
2637 7, 8, 9, 10, 11, 12,
2638 13, 14, 15, 16, 17, 18});
2639
Sadik Armagan2999a022019-04-09 14:20:12 +01002640 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2641 memoryManager,
2642 shape0, input0, 1.0f, 0,
2643 shape1, input1, 1.0f, 0,
2644 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002645}
2646
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002647LayerTestResult<uint8_t,4> DivisionUint8Test(
2648 armnn::IWorkloadFactory& workloadFactory,
2649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002650{
2651 const unsigned int width = 2;
2652 const unsigned int height = 2;
2653 const unsigned int channelCount = 2;
2654 const unsigned int batchSize = 2;
2655
2656 unsigned int shape[] = { batchSize, channelCount, height, width };
2657
2658 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2659 4, 4, 4, 4, 5, 5, 5, 5 });
2660
2661 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2662 4, 4, 4, 4, 4, 4, 4, 4 });
2663
2664 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2665 4, 4, 4, 4, 5, 5, 5, 5});
2666
2667
Sadik Armagan2999a022019-04-09 14:20:12 +01002668 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2669 memoryManager,
2670 shape, input0, 1.0f, 0,
2671 shape, input1, 1.0f, 0,
2672 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002673}
2674
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002675LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
2676 armnn::IWorkloadFactory& workloadFactory,
2677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002678{
2679 unsigned int shape0[] = { 1, 2, 2, 2 };
2680 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2681
2682 unsigned int shape1[] = { 1, 1, 1, 1 };
2683 std::vector<uint8_t> input1({ 2 });
2684
2685 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2686
Sadik Armagan2999a022019-04-09 14:20:12 +01002687 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2688 memoryManager,
2689 shape0, input0, 1.0f, 0,
2690 shape1, input1, 1.0f, 0,
2691 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002692}
2693
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002694LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
2695 armnn::IWorkloadFactory& workloadFactory,
2696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002697{
2698 unsigned int shape0[] = { 1, 3, 3, 2 };
2699 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
2700 7, 16, 9, 20, 11, 24,
2701 13, 28, 15, 32, 17, 36});
2702
2703 unsigned int shape1[] = { 1, 1, 1, 2 };
2704 std::vector<uint8_t> input1({ 1, 2 });
2705
2706 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
2707 7, 8, 9, 10, 11, 12,
2708 13, 14, 15, 16, 17, 18});
2709
Sadik Armagan2999a022019-04-09 14:20:12 +01002710 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2711 memoryManager,
2712 shape0, input0, 1.0f, 0,
2713 shape1, input1, 1.0f, 0,
2714 shape0, output, 1.0f, 0);
2715}
2716
2717LayerTestResult<int16_t,4> DivisionInt16Test(
2718 armnn::IWorkloadFactory& workloadFactory,
2719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2720{
2721 unsigned int shape[] = { 2, 2, 2, 2 };
2722
2723 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2724 4, 4, 4, 4, 5, 5, 5, 5 });
2725
2726 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2727 4, 4, 4, 4, 4, 4, 4, 4 });
2728
2729 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2730 4, 4, 4, 4, 5, 5, 5, 5});
2731
2732
2733 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2734 memoryManager,
2735 shape, input0, 1.0f, 0,
2736 shape, input1, 1.0f, 0,
2737 shape, output, 0.25f, 0);
2738}
2739
2740LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
2741 armnn::IWorkloadFactory& workloadFactory,
2742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2743{
2744 unsigned int shape0[] = { 1, 2, 2, 2 };
2745 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2746
2747 unsigned int shape1[] = { 1, 1, 1, 1 };
2748 std::vector<int16_t> input1({ 2 });
2749
2750 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2751
2752 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2753 memoryManager,
2754 shape0, input0, 1.0f, 0,
2755 shape1, input1, 1.0f, 0,
2756 shape0, output, 1.0f, 0);
2757}
2758
2759LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2760 armnn::IWorkloadFactory& workloadFactory,
2761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2762{
2763 unsigned int shape0[] = { 1, 3, 3, 2 };
2764 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2765 7, 16, 9, 20, 11, 24,
2766 13, 28, 15, 32, 17, 36});
2767
2768 unsigned int shape1[] = { 1, 1, 1, 2 };
2769 std::vector<int16_t> input1({ 1, 2 });
2770
2771 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2772 7, 8, 9, 10, 11, 12,
2773 13, 14, 15, 16, 17, 18});
2774
2775 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2776 memoryManager,
2777 shape0, input0, 1.0f, 0,
2778 shape1, input1, 1.0f, 0,
2779 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002780}
2781
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002782template<typename DescriptorType>
2783std::unique_ptr<armnn::IWorkload> CreateWorkload(
2784 const armnn::IWorkloadFactory& workloadFactory,
2785 const armnn::WorkloadInfo& info,
2786 const DescriptorType& descriptor)
2787{
2788 return CreateWorkload(workloadFactory, info, descriptor);
2789};
2790
2791template<>
2792std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2793 const armnn::IWorkloadFactory& workloadFactory,
2794 const armnn::WorkloadInfo& info,
2795 const armnn::MaximumQueueDescriptor& descriptor)
2796{
2797 return workloadFactory.CreateMaximum(descriptor, info);
2798}
2799
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002800template<>
2801std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2802 const armnn::IWorkloadFactory& workloadFactory,
2803 const armnn::WorkloadInfo& info,
2804 const armnn::MinimumQueueDescriptor& descriptor)
2805{
2806 return workloadFactory.CreateMinimum(descriptor, info);
2807}
2808
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002809template<>
2810std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2811 const armnn::IWorkloadFactory& workloadFactory,
2812 const armnn::WorkloadInfo& info,
2813 const armnn::EqualQueueDescriptor& descriptor)
2814{
2815 return workloadFactory.CreateEqual(descriptor, info);
2816}
2817
FrancisMurtagh878f0232018-12-19 10:56:15 +00002818template<>
2819std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2820 const armnn::IWorkloadFactory& workloadFactory,
2821 const armnn::WorkloadInfo& info,
2822 const armnn::GreaterQueueDescriptor& descriptor)
2823{
2824 return workloadFactory.CreateGreater(descriptor, info);
2825}
2826
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002827namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002828
2829template <typename Descriptor,
2830 armnn::DataType ArmnnTypeInput,
2831 armnn::DataType ArmnnTypeOutput,
2832 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2833 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2834LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2835 armnn::IWorkloadFactory & workloadFactory,
2836 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2837 const unsigned int shape0[4], std::vector<TInput> values0,
2838 const unsigned int shape1[4], std::vector<TInput> values1,
2839 const unsigned int outShape[4], std::vector<TOutput> outValues,
2840 float qScale = 0.0f, int qOffset = 0)
2841{
2842 const size_t dimensionCount = 4;
2843 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2844 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2845 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2846
2847 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2848 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2849
2850 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002851 {
kevmay012b4d88e2019-01-24 14:05:09 +00002852 inputTensorInfo0.SetQuantizationScale(qScale);
2853 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002854
kevmay012b4d88e2019-01-24 14:05:09 +00002855 inputTensorInfo1.SetQuantizationScale(qScale);
2856 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002857
kevmay012b4d88e2019-01-24 14:05:09 +00002858 outputTensorInfo.SetQuantizationScale(qScale);
2859 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002860 }
kevmay012b4d88e2019-01-24 14:05:09 +00002861
2862 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2863
2864 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2865 {
2866 ret.compareBoolean = true;
2867 }
2868
2869 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2870 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2871 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2872
2873 Descriptor data;
2874 armnn::WorkloadInfo info;
2875 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2876 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2877 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2878 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2879
2880 inputHandle0->Allocate();
2881 inputHandle1->Allocate();
2882 outputHandle->Allocate();
2883
2884 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2885 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2886
Derek Lambertif30f7d32019-04-09 10:25:02 +01002887 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002888 ExecuteWorkload(*workload, memoryManager);
2889
2890 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2891
2892 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2893 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002894}
2895
kevmay012b4d88e2019-01-24 14:05:09 +00002896template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2897LayerTestResult<T, 4> ElementwiseTestHelper(
2898 armnn::IWorkloadFactory & workloadFactory,
2899 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2900 const unsigned int shape0[4], std::vector<T> values0,
2901 const unsigned int shape1[4], std::vector<T> values1,
2902 const unsigned int outShape[4], std::vector<T> outValues,
2903 float qScale = 0.0f, int qOffset = 0)
2904{
2905 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2906 (workloadFactory,
2907 memoryManager,
2908 shape0,
2909 values0,
2910 shape1,
2911 values1,
2912 outShape,
2913 outValues,
2914 qScale,
2915 qOffset);
2916}
2917}
2918
2919LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002921{
2922 const unsigned int width = 2;
2923 const unsigned int height = 2;
2924 const unsigned int channelCount = 2;
2925 const unsigned int batchSize = 2;
2926
2927 unsigned int shape[] = { batchSize, channelCount, height, width };
2928
2929 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2930 3, 3, 3, 3, 4, 4, 4, 4 });
2931
2932 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2933 5, 5, 5, 5, 4, 4, 4, 4 });
2934
kevmay012b4d88e2019-01-24 14:05:09 +00002935 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2936 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002937
kevmay012b4d88e2019-01-24 14:05:09 +00002938 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002939 workloadFactory,
2940 memoryManager,
2941 shape,
2942 input0,
2943 shape,
2944 input1,
2945 shape,
2946 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002947}
2948
kevmay012b4d88e2019-01-24 14:05:09 +00002949LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002950 armnn::IWorkloadFactory& workloadFactory,
2951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2952{
2953 unsigned int shape0[] = { 1, 2, 2, 2 };
2954 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2955
2956 unsigned int shape1[] = { 1, 1, 1, 1 };
2957 std::vector<float> input1({ 1 });
2958
kevmay012b4d88e2019-01-24 14:05:09 +00002959 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002960
kevmay012b4d88e2019-01-24 14:05:09 +00002961 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002962 workloadFactory,
2963 memoryManager,
2964 shape0,
2965 input0,
2966 shape1,
2967 input1,
2968 shape0,
2969 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002970}
2971
kevmay012b4d88e2019-01-24 14:05:09 +00002972LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002973 armnn::IWorkloadFactory& workloadFactory,
2974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2975{
2976 const unsigned int shape0[] = { 1, 2, 2, 3 };
2977 const unsigned int shape1[] = { 1, 1, 1, 3 };
2978
2979 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2980 7, 8, 9, 10, 11, 12 });
2981
2982 std::vector<float> input1({ 1, 2, 3});
2983
kevmay012b4d88e2019-01-24 14:05:09 +00002984 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2985 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002986
kevmay012b4d88e2019-01-24 14:05:09 +00002987 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002988 workloadFactory,
2989 memoryManager,
2990 shape0,
2991 input0,
2992 shape1,
2993 input1,
2994 shape0,
2995 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002996}
2997
2998LayerTestResult<uint8_t, 4> EqualUint8Test(
2999 armnn::IWorkloadFactory& workloadFactory,
3000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3001{
3002 unsigned int shape[] = { 2, 2, 2, 2 };
3003
3004 // See dequantized values to the right.
3005 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003006 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003007
3008 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3009 3, 3, 3, 3, 5, 5, 5, 5 });
3010
3011 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3012 1, 1, 1, 1, 0, 0, 0, 0 });
3013
kevmay012b4d88e2019-01-24 14:05:09 +00003014 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3015 armnn::DataType::QuantisedAsymm8,
3016 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003017 workloadFactory,
3018 memoryManager,
3019 shape,
3020 input0,
3021 shape,
3022 input1,
3023 shape,
3024 output,
3025 1.0f,
3026 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003027}
3028
3029LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
3030 armnn::IWorkloadFactory& workloadFactory,
3031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3032{
3033 const unsigned int shape0[] = { 1, 2, 2, 3 };
3034 const unsigned int shape1[] = { 1, 1, 1, 1 };
3035
3036 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3037 7, 8, 9, 10, 11, 12 });
3038
3039 std::vector<uint8_t> input1({ 1 });
3040
3041 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
3042 0, 0, 0, 0, 0, 0 });
3043
kevmay012b4d88e2019-01-24 14:05:09 +00003044 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3045 armnn::DataType::QuantisedAsymm8,
3046 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003047 workloadFactory,
3048 memoryManager,
3049 shape0,
3050 input0,
3051 shape1,
3052 input1,
3053 shape0,
3054 output,
3055 1.0f,
3056 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003057}
3058
3059LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
3060 armnn::IWorkloadFactory& workloadFactory,
3061 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3062{
3063 const unsigned int shape0[] = { 1, 2, 2, 3 };
3064 const unsigned int shape1[] = { 1, 1, 1, 3 };
3065
3066 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3067 7, 8, 9, 10, 11, 12 });
3068
3069 std::vector<uint8_t> input1({ 1, 1, 3});
3070
3071 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
3072 0, 0, 0, 0, 0, 0 });
3073
kevmay012b4d88e2019-01-24 14:05:09 +00003074 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3075 armnn::DataType::QuantisedAsymm8,
3076 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003077 workloadFactory,
3078 memoryManager,
3079 shape0,
3080 input0,
3081 shape1,
3082 input1,
3083 shape0,
3084 output,
3085 1.0f,
3086 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003087}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003088
kevmay012b4d88e2019-01-24 14:05:09 +00003089LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00003090 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3091{
3092 const unsigned int width = 2;
3093 const unsigned int height = 2;
3094 const unsigned int channelCount = 2;
3095 const unsigned int batchSize = 2;
3096
3097 unsigned int shape[] = { batchSize, channelCount, height, width };
3098
3099 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3100 3, 3, 3, 3, 4, 4, 4, 4 });
3101
3102 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3103 5, 5, 5, 5, 4, 4, 4, 4 });
3104
kevmay012b4d88e2019-01-24 14:05:09 +00003105 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3106 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003107
kevmay012b4d88e2019-01-24 14:05:09 +00003108 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003109 workloadFactory,
3110 memoryManager,
3111 shape,
3112 input0,
3113 shape,
3114 input1,
3115 shape,
3116 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003117}
3118
kevmay012b4d88e2019-01-24 14:05:09 +00003119LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003120 armnn::IWorkloadFactory& workloadFactory,
3121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3122{
3123 unsigned int shape0[] = { 1, 2, 2, 2 };
3124 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3125
3126 unsigned int shape1[] = { 1, 1, 1, 1 };
3127 std::vector<float> input1({ 1 });
3128
kevmay012b4d88e2019-01-24 14:05:09 +00003129 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00003130
kevmay012b4d88e2019-01-24 14:05:09 +00003131 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003132 workloadFactory,
3133 memoryManager,
3134 shape0,
3135 input0,
3136 shape1,
3137 input1,
3138 shape0,
3139 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003140}
3141
kevmay012b4d88e2019-01-24 14:05:09 +00003142LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003143 armnn::IWorkloadFactory& workloadFactory,
3144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3145{
3146 const unsigned int shape0[] = { 1, 2, 2, 3 };
3147 const unsigned int shape1[] = { 1, 1, 1, 3 };
3148
3149 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
3150 7, 8, 9, 10, 11, 12 });
3151
3152 std::vector<float> input1({ 1, 3, 2});
3153
kevmay012b4d88e2019-01-24 14:05:09 +00003154 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
3155 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003156
kevmay012b4d88e2019-01-24 14:05:09 +00003157 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003158 workloadFactory,
3159 memoryManager,
3160 shape0,
3161 input0,
3162 shape1,
3163 input1,
3164 shape0,
3165 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003166}
3167
3168LayerTestResult<uint8_t, 4> GreaterUint8Test(
3169 armnn::IWorkloadFactory& workloadFactory,
3170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3171{
3172 unsigned int shape[] = { 2, 2, 2, 2 };
3173
3174 // See dequantized values to the right.
3175 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3176 3, 3, 3, 3, 5, 5, 5, 5 });
3177
3178 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3179 2, 2, 2, 2, 5, 5, 5, 5 });
3180
3181 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3182 1, 1, 1, 1, 0, 0, 0, 0 });
3183
kevmay012b4d88e2019-01-24 14:05:09 +00003184 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3185 armnn::DataType::QuantisedAsymm8,
3186 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003187 workloadFactory,
3188 memoryManager,
3189 shape,
3190 input0,
3191 shape,
3192 input1,
3193 shape,
3194 output,
3195 1.0f,
3196 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003197}
3198
3199LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3200 armnn::IWorkloadFactory& workloadFactory,
3201 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3202{
3203 const unsigned int shape0[] = { 1, 2, 2, 3 };
3204 const unsigned int shape1[] = { 1, 1, 1, 1 };
3205
3206 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3207 7, 8, 9, 10, 11, 12 });
3208
3209 std::vector<uint8_t> input1({ 1 });
3210
3211 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3212 1, 1, 1, 1, 1, 1 });
3213
kevmay012b4d88e2019-01-24 14:05:09 +00003214 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3215 armnn::DataType::QuantisedAsymm8,
3216 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003217 workloadFactory,
3218 memoryManager,
3219 shape0,
3220 input0,
3221 shape1,
3222 input1,
3223 shape0,
3224 output,
3225 1.0f,
3226 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003227}
3228
3229LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3230 armnn::IWorkloadFactory& workloadFactory,
3231 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3232{
3233 const unsigned int shape0[] = { 1, 2, 2, 3 };
3234 const unsigned int shape1[] = { 1, 1, 1, 3 };
3235
3236 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3237 7, 8, 9, 10, 11, 12 });
3238
3239 std::vector<uint8_t> input1({ 1, 1, 3});
3240
3241 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3242 1, 1, 1, 1, 1, 1 });
3243
kevmay012b4d88e2019-01-24 14:05:09 +00003244 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3245 armnn::DataType::QuantisedAsymm8,
3246 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003247 workloadFactory,
3248 memoryManager,
3249 shape0,
3250 input0,
3251 shape1,
3252 input1,
3253 shape0,
3254 output,
3255 1.0f,
3256 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003257}
3258
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003259LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3261{
3262 const unsigned int width = 2;
3263 const unsigned int height = 2;
3264 const unsigned int channelCount = 2;
3265 const unsigned int batchSize = 2;
3266
3267 unsigned int shape[] = { batchSize, channelCount, height, width };
3268
3269 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3270 3, 3, 3, 3, 4, 4, 4, 4 });
3271
3272 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3273 4, 4, 4, 4, 5, 5, 5, 5 });
3274
3275 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
3276 4, 4, 4, 4, 5, 5, 5, 5 });
3277
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003278 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3279 workloadFactory,
3280 memoryManager,
3281 shape,
3282 input0,
3283 shape,
3284 input1,
3285 shape,
3286 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003287}
3288
3289LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3290 armnn::IWorkloadFactory& workloadFactory,
3291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3292{
3293 unsigned int shape0[] = { 1, 2, 2, 2 };
3294 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3295
3296 unsigned int shape1[] = { 1, 1, 1, 1 };
3297 std::vector<float> input1({ 2 });
3298
3299 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3300
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003301 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3302 workloadFactory,
3303 memoryManager,
3304 shape0,
3305 input0,
3306 shape1,
3307 input1,
3308 shape0,
3309 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003310}
3311
3312LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3313 armnn::IWorkloadFactory& workloadFactory,
3314 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3315{
3316 const unsigned int shape0[] = { 1, 2, 2, 3 };
3317 const unsigned int shape1[] = { 1, 1, 1, 3 };
3318
3319 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3320 7, 8, 9, 10, 11, 12 });
3321
3322 std::vector<float> input1({ 1, 2, 3});
3323
3324 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003325 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003326
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003327 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3328 workloadFactory,
3329 memoryManager,
3330 shape0,
3331 input0,
3332 shape1,
3333 input1,
3334 shape0,
3335 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003336}
3337
3338LayerTestResult<uint8_t, 4> MaximumUint8Test(
3339 armnn::IWorkloadFactory& workloadFactory,
3340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3341{
3342 unsigned int shape[] = { 2, 2, 2, 2 };
3343
3344 // See dequantized values to the right.
3345 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3346 3, 3, 3, 3, 4, 4, 4, 4 });
3347
3348 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3349 4, 4, 4, 4, 5, 5, 5, 5 });
3350
3351 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3352 4, 4, 4, 4, 5, 5, 5, 5 });
3353
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003354 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3355 workloadFactory,
3356 memoryManager,
3357 shape,
3358 input0,
3359 shape,
3360 input1,
3361 shape,
3362 output,
3363 1.0f,
3364 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003365}
3366
3367LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3368 armnn::IWorkloadFactory& workloadFactory,
3369 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3370{
3371 const unsigned int shape0[] = { 1, 2, 2, 3 };
3372 const unsigned int shape1[] = { 1, 1, 1, 1 };
3373
3374 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3375 7, 8, 9, 10, 11, 12 });
3376
3377 std::vector<uint8_t> input1({2});
3378
3379 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3380 7, 8, 9, 10, 11, 12 });
3381
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003382 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3383 workloadFactory,
3384 memoryManager,
3385 shape0,
3386 input0,
3387 shape1,
3388 input1,
3389 shape0,
3390 output,
3391 1.0f,
3392 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003393}
3394
3395LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3396 armnn::IWorkloadFactory& workloadFactory,
3397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3398{
3399 const unsigned int shape0[] = { 1, 2, 2, 3 };
3400 const unsigned int shape1[] = { 1, 1, 1, 3 };
3401
3402 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3403 7, 8, 9, 10, 11, 12 });
3404
3405 std::vector<uint8_t> input1({ 1, 10, 3});
3406
3407 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3408 7, 10, 9, 10, 11, 12 });
3409
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003410 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3411 workloadFactory,
3412 memoryManager,
3413 shape0,
3414 input0,
3415 shape1,
3416 input1,
3417 shape0,
3418 output,
3419 1.0f,
3420 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003421}
3422
Sadik Armagan2999a022019-04-09 14:20:12 +01003423LayerTestResult<int16_t, 4> MaximumInt16Test(
3424 armnn::IWorkloadFactory& workloadFactory,
3425 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3426{
3427 unsigned int shape[] = { 2, 2, 2, 2 };
3428
3429 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3430 3, 3, 3, 3, 4, 4, 4, 4 });
3431
3432 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3433 4, 4, 4, 4, 5, 5, 5, 5 });
3434
3435 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3436 4, 4, 4, 4, 5, 5, 5, 5 });
3437
3438 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3439 workloadFactory,
3440 memoryManager,
3441 shape,
3442 input0,
3443 shape,
3444 input1,
3445 shape,
3446 output,
3447 1.0f,
3448 0);
3449}
3450
3451LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3452 armnn::IWorkloadFactory& workloadFactory,
3453 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3454{
3455 const unsigned int shape0[] = { 1, 2, 2, 3 };
3456 const unsigned int shape1[] = { 1, 1, 1, 1 };
3457
3458 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3459 7, 8, 9, 10, 11, 12 });
3460
3461 std::vector<int16_t> input1({2});
3462
3463 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3464 7, 8, 9, 10, 11, 12 });
3465
3466 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3467 workloadFactory,
3468 memoryManager,
3469 shape0,
3470 input0,
3471 shape1,
3472 input1,
3473 shape0,
3474 output,
3475 1.0f,
3476 0);
3477}
3478
3479LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3480 armnn::IWorkloadFactory& workloadFactory,
3481 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3482{
3483 const unsigned int shape0[] = { 1, 2, 2, 3 };
3484 const unsigned int shape1[] = { 1, 1, 1, 3 };
3485
3486 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3487 7, 8, 9, 10, 11, 12 });
3488
3489 std::vector<int16_t> input1({ 1, 10, 3});
3490
3491 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3492 7, 10, 9, 10, 11, 12 });
3493
3494 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3495 workloadFactory,
3496 memoryManager,
3497 shape0,
3498 input0,
3499 shape1,
3500 input1,
3501 shape0,
3502 output,
3503 1.0f,
3504 0);
3505}
3506
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003507LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3508 armnn::IWorkloadFactory& workloadFactory,
3509 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3510{
3511 unsigned int shape0[] = { 1, 2, 2, 2 };
3512 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3513
3514 unsigned int shape1[] = { 1, 1, 1, 1 };
3515 std::vector<float> input1({ 2 });
3516
3517 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3518
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003519 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3520 workloadFactory,
3521 memoryManager,
3522 shape0,
3523 input0,
3524 shape1,
3525 input1,
3526 shape0,
3527 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003528}
3529
3530
3531LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3532 armnn::IWorkloadFactory& workloadFactory,
3533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3534{
3535 unsigned int shape0[] = { 1, 2, 2, 2 };
3536 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3537
3538 unsigned int shape1[] = { 1, 1, 1, 1 };
3539 std::vector<float> input1({ 5 });
3540
3541 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3542
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003543 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3544 workloadFactory,
3545 memoryManager,
3546 shape0,
3547 input0,
3548 shape1,
3549 input1,
3550 shape0,
3551 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003552}
3553
3554LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3555 armnn::IWorkloadFactory & workloadFactory,
3556 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3557{
3558 const unsigned int shape0[] = { 1, 2, 2, 3 };
3559 const unsigned int shape1[] = { 1, 1, 1, 3 };
3560
3561 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
3562 7, 1, 2, 3, 4, 5 });
3563
3564 std::vector<uint8_t> input1({ 1, 2, 3});
3565
3566 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
3567 1, 1, 2, 1, 2, 3 });
3568
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003569 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3570 workloadFactory,
3571 memoryManager,
3572 shape0,
3573 input0,
3574 shape1,
3575 input1,
3576 shape0,
3577 output,
3578 1.0f,
3579 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003580}
3581
Sadik Armagan2999a022019-04-09 14:20:12 +01003582LayerTestResult<int16_t, 4> MinimumInt16Test(
3583 armnn::IWorkloadFactory& workloadFactory,
3584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3585{
3586 unsigned int shape[] = { 2, 2, 2, 2 };
3587
3588 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3589 3, 3, 3, 3, 4, 4, 4, 4 });
3590
3591 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3592 4, 4, 4, 4, 5, 5, 5, 5 });
3593
3594 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
3595 3, 3, 3, 3, 4, 4, 4, 4 });
3596
3597 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3598 workloadFactory,
3599 memoryManager,
3600 shape,
3601 input0,
3602 shape,
3603 input1,
3604 shape,
3605 output,
3606 1.0f,
3607 0);
3608}
3609
3610LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
3611 armnn::IWorkloadFactory& workloadFactory,
3612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3613{
3614 const unsigned int shape0[] = { 1, 2, 2, 3 };
3615 const unsigned int shape1[] = { 1, 1, 1, 1 };
3616
3617 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3618 7, 8, 9, 10, 11, 12 });
3619
3620 std::vector<int16_t> input1({2});
3621
3622 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
3623 2, 2, 2, 2, 2, 2 });
3624
3625 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3626 workloadFactory,
3627 memoryManager,
3628 shape0,
3629 input0,
3630 shape1,
3631 input1,
3632 shape0,
3633 output,
3634 1.0f,
3635 0);
3636}
3637
3638LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
3639 armnn::IWorkloadFactory& workloadFactory,
3640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3641{
3642 const unsigned int shape0[] = { 1, 2, 2, 3 };
3643 const unsigned int shape1[] = { 1, 1, 1, 3 };
3644
3645 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3646 7, 8, 9, 10, 11, 12 });
3647
3648 std::vector<int16_t> input1({ 1, 10, 3});
3649
3650 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
3651 1, 8, 3, 1, 10, 3 });
3652
3653 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3654 workloadFactory,
3655 memoryManager,
3656 shape0,
3657 input0,
3658 shape1,
3659 input1,
3660 shape0,
3661 output,
3662 1.0f,
3663 0);
3664}
3665
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003666namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003667LayerTestResult<float,4> MultiplicationTestHelper(
3668 armnn::IWorkloadFactory& workloadFactory,
3669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3670 const unsigned int shape0[4],
3671 const std::vector<float> & values0,
3672 const unsigned int shape1[4],
3673 const std::vector<float> & values1,
3674 const unsigned int outShape[4],
3675 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00003676{
surmeh01bceff2f2018-03-29 16:29:27 +01003677 const size_t dimensionCount = 4;
3678 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
3679 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
3680 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00003681
surmeh01bceff2f2018-03-29 16:29:27 +01003682 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
3683 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003684
3685 LayerTestResult<float,4> ret(outputTensorInfo);
3686
3687 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3688 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3689 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3690
3691 armnn::MultiplicationQueueDescriptor data;
3692 armnn::WorkloadInfo info;
3693 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3694 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3695 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3696
3697 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3698
3699 inputHandle0->Allocate();
3700 inputHandle1->Allocate();
3701 outputHandle->Allocate();
3702
3703 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3704 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3705
Derek Lambertif30f7d32019-04-09 10:25:02 +01003706 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003707 workload->Execute();
3708
3709 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3710
surmeh01bceff2f2018-03-29 16:29:27 +01003711 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003712 return ret;
3713}
surmeh01bceff2f2018-03-29 16:29:27 +01003714} // anonymous namespace
3715
3716
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003717LayerTestResult<float,4> MultiplicationTest(
3718 armnn::IWorkloadFactory& workloadFactory,
3719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003720{
3721 const unsigned int width = 2;
3722 const unsigned int height = 2;
3723 const unsigned int channelCount = 2;
3724 const unsigned int batchSize = 2;
3725
3726 unsigned int shape[] = { batchSize, channelCount, height, width };
3727
3728 std::vector<float> input0({
3729 1, 1, 1, 1, 2, 2, 2, 2,
3730 3, 3, 3, 3, 4, 4, 4, 4 });
3731
3732 std::vector<float> input1({
3733 2, 2, 2, 2, 3, 3, 3, 3,
3734 4, 4, 4, 4, 5, 5, 5, 5 });
3735
3736 std::vector<float> output({
3737 2, 2, 2, 2, 6, 6, 6, 6,
3738 12, 12, 12, 12, 20, 20, 20, 20 });
3739
3740 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003741 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003742 shape,
3743 input0,
3744 shape,
3745 input1,
3746 shape,
3747 output);
3748}
3749
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003750LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3751 armnn::IWorkloadFactory& workloadFactory,
3752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003753{
3754 unsigned int shape0[] = { 1, 2, 2, 2 };
3755 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3756
3757 unsigned int shape1[] = { 1, 1, 1, 1 };
3758 std::vector<float> input1({ 2 });
3759
3760 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3761
3762 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003763 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003764 shape0,
3765 input0,
3766 shape1,
3767 input1,
3768 shape0,
3769 output);
3770}
3771
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003772LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3773 armnn::IWorkloadFactory& workloadFactory,
3774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003775{
3776 unsigned int shape0[] = { 1, 3, 3, 2 };
3777 std::vector<float> input0({
3778 1, 2, 3, 4, 5, 6,
3779 7, 8, 9, 10, 11, 12,
3780 13, 14, 15, 16, 17, 18});
3781
3782 unsigned int shape1[] = { 1, 1, 1, 2 };
3783 std::vector<float> input1({ 1, 2 });
3784
3785 std::vector<float> output({
3786 1, 4, 3, 8, 5, 12,
3787 7, 16, 9, 20, 11, 24,
3788 13, 28, 15, 32, 17, 36});
3789
3790 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003791 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003792 shape0,
3793 input0,
3794 shape1,
3795 input1,
3796 shape0,
3797 output);
3798}
telsoa014fcda012018-03-09 14:13:49 +00003799
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003800LayerTestResult<float,4> CompareMultiplicationTest(
3801 armnn::IWorkloadFactory& workloadFactory,
3802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3803 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003804{
3805 const unsigned int width = 16;
3806 const unsigned int height = 32;
3807 const unsigned int channelCount = 2;
3808 const unsigned int batchSize = 5;
3809
3810 armnn::TensorInfo inputTensorInfo0;
3811 armnn::TensorInfo inputTensorInfo1;
3812 armnn::TensorInfo outputTensorInfo;
3813
3814 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3815
3816 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3817 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3818 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3819
3820 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3821
3822 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3823 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3824
3825 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3826 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3827 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3828
3829 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3830 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3831 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3832
3833 armnn::MultiplicationQueueDescriptor data;
3834 armnn::WorkloadInfo info;
3835 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3836 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3837 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3838
3839 armnn::MultiplicationQueueDescriptor refData = data;
3840 armnn::WorkloadInfo refInfo = info;
3841 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3842 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3843 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3844
3845 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3846 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3847
3848 inputHandle0->Allocate();
3849 inputHandle1->Allocate();
3850 outputHandle->Allocate();
3851 inputHandle0Ref->Allocate();
3852 inputHandle1Ref->Allocate();
3853 outputHandleRef->Allocate();
3854
3855 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3856 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3857 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3858 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3859
Derek Lambertif30f7d32019-04-09 10:25:02 +01003860 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003861 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003862 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003863 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003864 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3865 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3866
3867 return comparisonResult;
3868}
3869
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003870LayerTestResult<float,4> CompareBatchNormTest(
3871 armnn::IWorkloadFactory& workloadFactory,
3872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3873 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003874{
3875 const unsigned int width = 2;
3876 const unsigned int height = 3;
3877 const unsigned int channels = 5;
3878 const unsigned int batchSize = 3;
3879
3880 armnn::TensorInfo inputTensorInfo;
3881 armnn::TensorInfo outputTensorInfo;
3882 armnn::TensorInfo tensorInfo;
3883
3884 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3885 constexpr unsigned int tensorShape[] = {channels};
3886
3887 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3888 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3889 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3890
3891 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3892
3893 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3894 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3895 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3896 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3897
3898 LayerTestResult<float,4> ret(outputTensorInfo);
3899
3900 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3901 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3902
3903 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3904 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3905
3906 armnn::BatchNormalizationQueueDescriptor data;
3907 armnn::WorkloadInfo info;
3908 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3909 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3910 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3911 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3912
3913 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
3914 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
3915 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
3916 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
3917
3918 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3919 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3920 data.m_Mean = &meanTensor;
3921 data.m_Variance = &varianceTensor;
3922 data.m_Beta = &betaTensor;
3923 data.m_Gamma = &gammaTensor;
3924 data.m_Parameters.m_Eps = 0.01f;
3925
3926 armnn::BatchNormalizationQueueDescriptor refData = data;
3927 armnn::WorkloadInfo refInfo = info;
3928 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3929 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3930
3931 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
3932 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
3933
3934 inputHandle->Allocate();
3935 outputHandle->Allocate();
3936 inputHandleRef->Allocate();
3937 outputHandleRef->Allocate();
3938
3939 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3940 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3941
Derek Lambertif30f7d32019-04-09 10:25:02 +01003942 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003943 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003944 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003945 workloadRef->Execute();
3946
3947 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3948 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3949
3950 return ret;
3951}
3952
surmeh013537c2c2018-05-18 16:31:43 +01003953template<typename T>
3954void PermuteTensorData(
3955 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003956 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003957 const armnn::PermutationVector& mappings,
3958 armnn::TensorInfo & inputTensorInfo,
3959 const T * inputData,
3960 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003961{
surmeh013537c2c2018-05-18 16:31:43 +01003962 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3963 if (inputData == nullptr)
3964 {
3965 // Nullptr is an error in the test. By returning without doing the concatenation
3966 // I expect the caller to fail the test. It still makes sense to report this as
3967 // an assert for Debug builds.
3968 return;
3969 }
telsoa014fcda012018-03-09 14:13:49 +00003970
surmeh013537c2c2018-05-18 16:31:43 +01003971 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3972
3973 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3974 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3975
3976 armnn::PermuteQueueDescriptor queueDescriptor;
3977 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3978 armnn::WorkloadInfo workloadInfo;
3979 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3980 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3981
3982 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3983
3984 inputHandle->Allocate();
3985 outputHandle->Allocate();
3986
3987 CopyDataToITensorHandle(inputHandle.get(), inputData);
3988
Derek Lambertif30f7d32019-04-09 10:25:02 +01003989 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01003990 workload->Execute();
3991
3992 outputData.resize(outputTensorInfo.GetNumElements());
3993 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3994 inputTensorInfo = outputTensorInfo;
3995}
3996
Jim Flynn825af452019-05-20 12:49:28 +01003997armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01003998 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3999 unsigned int concatDim)
4000{
telsoa014fcda012018-03-09 14:13:49 +00004001 std::vector<armnn::TensorShape> shapes;
4002 shapes.reserve(inputTensorInfos.size());
4003 for (const armnn::TensorInfo& it: inputTensorInfos)
4004 {
4005 shapes.push_back(it.GetShape());
4006 }
surmeh013537c2c2018-05-18 16:31:43 +01004007
Jim Flynn825af452019-05-20 12:49:28 +01004008 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
4009 shapes.end(),
4010 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01004011}
4012
4013//
narpra015cdda352018-11-19 15:30:27 +00004014// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
4015// In case of <4 dimensions we need to make sure that the concat dimensions are at least
4016// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01004017//
4018
4019bool NeedPermuteForConcat(
4020 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4021 unsigned int concatDim)
4022{
4023 // See note above. Additionally we expect the input shapes to have the
4024 // same number of dimensions.
4025 unsigned int nDimensions = 0;
4026
telsoa01c577f2c2018-08-31 09:22:23 +01004027 // Determine the number of dimensions as well as sanity check them
4028 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01004029 for (auto && tensorInfo : inputTensorInfos)
4030 {
4031 if (!nDimensions)
4032 {
4033 nDimensions = tensorInfo.GetShape().GetNumDimensions();
4034 }
4035 else
4036 {
4037 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
4038 "Input shapes must have the same number of dimensions");
4039 }
4040 }
4041
narpra015cdda352018-11-19 15:30:27 +00004042 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01004043}
4044
4045armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
4046{
4047 unsigned int numDims = inputShape.GetNumDimensions();
4048 if (numDims >= 3)
4049 {
4050 // Nothing to do if the inputShape has at least 3 dimensions.
4051 return inputShape;
4052 }
4053
4054 std::vector<unsigned int> newDims(size_t(3), 1u);
4055 unsigned int expandedBy = 3 - numDims;
4056 for (unsigned int i=0; i<numDims; ++i)
4057 {
4058 newDims[expandedBy+i] = inputShape[i];
4059 }
4060 return armnn::TensorShape(3u, &newDims[0]);
4061}
4062
4063void Generate3dPermuteVectorForConcat(
4064 unsigned int numDimensions,
4065 unsigned int & concatDim,
4066 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
4067{
4068 BOOST_ASSERT_MSG(numDimensions <= 3,
4069 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01004070 unsigned int expandedBy = 3 - numDimensions;
4071 unsigned int expandedConcatAxis = concatDim + expandedBy;
4072
4073 if (expandedConcatAxis == 2)
4074 {
4075 concatDim = 0;
4076 armnn::PermutationVector forwardPermutation({1, 2, 0});
4077 armnn::PermutationVector reversePermutation({2, 0, 1});
4078 permutations = std::make_pair(forwardPermutation, reversePermutation);
4079 }
4080 else if (expandedConcatAxis == 1)
4081 {
4082 concatDim = 0;
4083 armnn::PermutationVector forwardPermutation({2, 0, 1});
4084 armnn::PermutationVector reversePermutation({1, 2, 0});
4085 permutations = std::make_pair(forwardPermutation, reversePermutation);
4086 }
4087 else
4088 {
4089 BOOST_ASSERT(expandedConcatAxis == 0);
4090 concatDim = 0;
4091 }
4092}
4093
4094//
4095// Permute the input tensors so we can do a supported concatenation.
4096// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
4097// at the front. Finally this function tells what the output shape
4098// of the permuted concatenated tensor is going to be.
4099//
4100template <typename T>
4101void PermuteInputsForConcat(
4102 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004103 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004104 std::vector<armnn::TensorInfo> & inputTensorInfos,
4105 std::vector<T *> & inputData,
4106 std::vector<std::vector<T>> & inputDataStorage,
4107 armnn::PermutationVector & permuteVector,
4108 unsigned int & concatDim,
4109 armnn::TensorInfo & outputTensorInfo)
4110{
4111 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
4112 "Expecting more than one tensor to be concatenated here");
4113
4114 unsigned int numDims = 0;
4115 unsigned int nthInput = 0;
4116 const armnn::PermutationVector identity({0, 1, 2});
4117
4118 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
4119 std::make_pair(identity, identity);
4120
4121 inputDataStorage.resize(inputData.size());
4122
4123 for (auto && tensorInfo : inputTensorInfos)
4124 {
4125 if (numDims == 0)
4126 {
4127 numDims = tensorInfo.GetShape().GetNumDimensions();
4128 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00004129
telsoa01c577f2c2018-08-31 09:22:23 +01004130 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01004131 permuteVector = permutations.second;
4132 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
4133 "Test logic error, we don't need permutation, so we shouldn't arrive here");
4134 }
4135 else
4136 {
4137 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
4138 "All inputs must have the same number of dimensions");
4139 }
4140
4141 armnn::TensorInfo newTensorInfo = tensorInfo;
4142 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
4143
4144 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004145 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004146 permutations.first,
4147 newTensorInfo,
4148 inputData[nthInput],
4149 inputDataStorage[nthInput]);
4150
4151 inputData[nthInput] = inputDataStorage[nthInput].data();
4152 inputTensorInfos[nthInput] = newTensorInfo;
4153
4154 ++nthInput;
4155 }
4156
4157 outputTensorInfo.SetShape(
4158 armnnUtils::Permuted(
4159 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
4160 permutations.first));
4161}
4162
4163
4164//
4165// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01004166// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01004167// output.
4168//
4169template <typename T>
4170void PermuteOutputForConcat(
4171 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004172 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004173 const armnn::TensorInfo & tensorInfo,
4174 const armnn::PermutationVector & permuteVector,
4175 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4176 T * data)
4177{
4178 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4179 if (data == nullptr)
4180 {
4181 // Nullptr is an error in the test. By returning without doing the permutation
4182 // I expect the caller to fail the test. It still makes sense to report this as
4183 // an assert for Debug builds.
4184 return;
4185 }
4186
4187 armnn::TensorInfo resultTensorInfo = tensorInfo;
4188 std::vector<T> inputData(tensorInfo.GetNumElements());
4189 std::vector<T> outputData;
4190
4191 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4192
4193 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004194 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004195 permuteVector,
4196 resultTensorInfo,
4197 &inputData[0],
4198 outputData);
4199
4200 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4201}
4202
4203template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004204void Concatenate(
4205 armnn::IWorkloadFactory& workloadFactory,
4206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4207 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4208 std::initializer_list<T *> inputsOrig,
4209 const armnn::TensorInfo& outputTensorInfoOrig,
4210 T * output,
narpra015cdda352018-11-19 15:30:27 +00004211 unsigned int concatDim,
4212 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01004213{
4214 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4215 if (output == nullptr)
4216 {
4217 // Nullptr is an error in the test. By returning without doing the permutation
4218 // I expect the caller to fail the test. It still makes sense to report this as
4219 // an assert for Debug builds.
4220 return;
4221 }
4222
telsoa01c577f2c2018-08-31 09:22:23 +01004223 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01004224 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4225 std::vector<T *> inputs = inputsOrig;
4226 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4227
4228 armnn::PermutationVector permuteVector{0, 1, 2};
4229
telsoa01c577f2c2018-08-31 09:22:23 +01004230 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01004231 std::vector<std::vector<T>> tmpInputDataStorage;
4232
4233 const size_t inputCount = inputTensorInfos.size();
4234
4235 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4236
4237 if (needPermuteForConcat)
4238 {
4239 //
4240 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01004241 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01004242 //
4243 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004244 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004245 inputTensorInfos,
4246 inputs,
4247 tmpInputDataStorage,
4248 permuteVector,
4249 concatDim,
4250 outputTensorInfo);
4251 }
4252
narpra015cdda352018-11-19 15:30:27 +00004253 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00004254
4255 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4256 inputHandles.reserve(inputCount);
4257
narpra015cdda352018-11-19 15:30:27 +00004258 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4259
Jim Flynne242f2d2019-05-22 14:24:13 +01004260 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01004261 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00004262 queueDescriptor.m_Parameters = viewsDescriptor;
4263
4264 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004265 {
narpra015cdda352018-11-19 15:30:27 +00004266 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4267 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4268 {
4269 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4270 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4271 }
telsoa014fcda012018-03-09 14:13:49 +00004272
narpra015cdda352018-11-19 15:30:27 +00004273 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004274
narpra015cdda352018-11-19 15:30:27 +00004275 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4276 for (unsigned int i = 0; i < inputCount; ++i)
4277 {
4278 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4279 std::unique_ptr<armnn::ITensorHandle> inputHandle =
4280 subTensorsSupported ?
4281 workloadFactory.CreateSubTensorHandle(*outputHandle,
4282 inputTensorInfo.GetShape(),
4283 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4284 workloadFactory.CreateTensorHandle(inputTensorInfo);
4285
4286 inputHandles.emplace_back(std::move(inputHandle));
4287 }
4288
telsoa014fcda012018-03-09 14:13:49 +00004289 }
narpra015cdda352018-11-19 15:30:27 +00004290 else
4291 {
4292 for (unsigned int i = 0; i < inputCount; ++i)
4293 {
4294 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4295 inputHandles.emplace_back(std::move(inputHandle));
4296 }
4297 }
telsoa014fcda012018-03-09 14:13:49 +00004298
4299 for (unsigned int i = 0; i < inputCount; ++i)
4300 {
surmeh013537c2c2018-05-18 16:31:43 +01004301 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00004302 }
4303
4304 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4305
Jim Flynn4ed6c832019-05-20 11:02:46 +01004306 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00004307
4308 for (auto& inputHandle : inputHandles)
4309 {
4310 inputHandle->Allocate();
4311 }
4312
4313 outputHandle->Allocate();
4314
4315 unsigned int nextInputId = 0;
4316 for (auto& inputHandle : inputHandles)
4317 {
surmeh013537c2c2018-05-18 16:31:43 +01004318 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4319 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00004320 }
4321
Derek Lambertif30f7d32019-04-09 10:25:02 +01004322 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004323 workload->Execute();
4324
surmeh013537c2c2018-05-18 16:31:43 +01004325 if (needPermuteForConcat)
4326 {
4327 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004328 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004329 outputTensorInfo,
4330 permuteVector,
4331 std::move(outputHandle),
4332 output);
4333 }
4334 else
4335 {
4336 CopyDataFromITensorHandle(output, outputHandle.get());
4337 }
telsoa014fcda012018-03-09 14:13:49 +00004338}
4339
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004340template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004341LayerTestResult<T, 1> Concatenation1dTestImpl(
4342 armnn::IWorkloadFactory& workloadFactory,
4343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4344 float qScale,
4345 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004346{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004347 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004348
4349 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4350 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4351 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4352
Jim Flynncbb66aa2019-05-15 13:03:54 +01004353 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004354
4355 LayerTestResult<T, 1> result(outputTensorInfo);
4356
4357 std::vector<T> output;
4358 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004359 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004360 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4361 { input0.data(), input1.data(), input2.data() },
4362 outputTensorInfo,
4363 output.data(),
4364 0,
4365 true);
telsoa014fcda012018-03-09 14:13:49 +00004366
4367 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4368 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4369 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4370 }));
4371
4372 return result;
4373}
4374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004375LayerTestResult<float, 1> Concatenation1dTest(
4376 armnn::IWorkloadFactory& workloadFactory,
4377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004378{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004379 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004380}
4381
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004382template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004383LayerTestResult<T, 2> Concatenation2dTestImpl(
4384 armnn::IWorkloadFactory& workloadFactory,
4385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004386 const armnn::TensorInfo& outputTensorInfo,
4387 unsigned int dimension,
4388 const float qScale,
4389 const int32_t qOffset)
4390{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004391 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004392
4393 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4394 // Batch 0
4395 1.0f, 2.0f, 3.0f,
4396
4397 // Batch 1
4398 10.0f, 11.0f, 12.0f,
4399 }));
4400
4401 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4402 // Batch 0
4403 4.0f, 5.0f, 6.0f,
4404
4405 // Batch 1
4406 13.0f, 14.0f, 15.0f,
4407 }));
4408
4409 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4410 // Batch 0
4411 7.0f, 8.0f, 9.0f,
4412
4413 // Batch 1
4414 16.0f, 17.0f, 18.0f,
4415 }));
4416
4417 LayerTestResult<T, 2> result(outputTensorInfo);
4418
4419 std::vector<T> output;
4420 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004421 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004422 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4423 { input0.data(), input1.data(), input2.data() },
4424 outputTensorInfo,
4425 output.data(),
4426 dimension,
4427 true);
telsoa014fcda012018-03-09 14:13:49 +00004428
4429 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4430 return result;
4431}
4432
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004433template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004434LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4435 armnn::IWorkloadFactory& workloadFactory,
4436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4437 float qScale,
4438 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004439{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004440 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004441
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004442 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4443 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4444
telsoa014fcda012018-03-09 14:13:49 +00004445 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4446 // Batch 0
4447 1.0f, 2.0f, 3.0f,
4448
4449 // Batch 1
4450 10.0f, 11.0f, 12.0f,
4451
4452 // Batch 2
4453 4.0f, 5.0f, 6.0f,
4454
4455 // Batch 3
4456 13.0f, 14.0f, 15.0f,
4457
4458 // Batch 4
4459 7.0f, 8.0f, 9.0f,
4460
4461 // Batch 5
4462 16.0f, 17.0f, 18.0f,
4463 }));
4464
4465 return result;
4466}
4467
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004468LayerTestResult<float, 2> Concatenation2dDim0Test(
4469 armnn::IWorkloadFactory& workloadFactory,
4470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004471{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004472 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004473}
4474
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004475template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004476LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4477 armnn::IWorkloadFactory& workloadFactory,
4478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4479 float qScale,
4480 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004481{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004482 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004483
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004484 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4485 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4486
telsoa014fcda012018-03-09 14:13:49 +00004487 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4488 // Batch 0
4489 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4490
4491 // Batch 1
4492 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4493 }));
4494
4495 return result;
4496}
4497
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004498LayerTestResult<float, 2> Concatenation2dDim1Test(
4499 armnn::IWorkloadFactory& workloadFactory,
4500 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004501{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004502 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004503}
4504
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004505template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004506LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4507 armnn::IWorkloadFactory& workloadFactory,
4508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4509 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004510 int32_t qOffset)
4511{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004512 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004513 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4514 // Batch 0
4515 1.0f, 2.0f, 3.0f,
4516
4517 // Batch 1
4518 10.0f, 11.0f, 12.0f,
4519 }));
4520
Jim Flynncbb66aa2019-05-15 13:03:54 +01004521 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004522 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4523 // Batch 0
4524 4.0f, 5.0f, 6.0f,
4525
4526 // Batch 1
4527 13.0f, 14.0f, 15.0f,
4528
4529 // Batch 0
4530 7.0f, 8.0f, 9.0f,
4531 }));
4532
Jim Flynncbb66aa2019-05-15 13:03:54 +01004533 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004534 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4535 // Batch 1
4536 16.0f, 17.0f, 18.0f,
4537 }));
4538
Jim Flynncbb66aa2019-05-15 13:03:54 +01004539 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004540 LayerTestResult<T, 2> result(outputTensorInfo);
4541
4542 std::vector<T> output;
4543 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004544 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004545 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4546 { input0.data(), input1.data(), input2.data() },
4547 outputTensorInfo,
4548 output.data(),
4549 0,
4550 true);
telsoa014fcda012018-03-09 14:13:49 +00004551
4552 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4553 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4554 // Batch 0
4555 1.0f, 2.0f, 3.0f,
4556
4557 // Batch 1
4558 10.0f, 11.0f, 12.0f,
4559
4560 // Batch 2
4561 4.0f, 5.0f, 6.0f,
4562
4563 // Batch 3
4564 13.0f, 14.0f, 15.0f,
4565
4566 // Batch 4
4567 7.0f, 8.0f, 9.0f,
4568
4569 // Batch 5
4570 16.0f, 17.0f, 18.0f,
4571 }));
4572
4573 return result;
4574}
4575
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004576LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
4577 armnn::IWorkloadFactory& workloadFactory,
4578 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004579{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004580 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4581 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004582}
4583
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004584template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004585LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
4586 armnn::IWorkloadFactory& workloadFactory,
4587 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4588 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004589 int32_t qOffset)
4590{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004591 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004592 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4593 // Batch 0
4594 1.0f, 2.0f, 3.0f,
4595
4596 // Batch 1
4597 10.0f, 11.0f, 12.0f,
4598 }));
4599
Jim Flynncbb66aa2019-05-15 13:03:54 +01004600 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004601 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4602 // Batch 0
4603 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
4604
4605 // Batch 1
4606 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
4607 }));
4608
Jim Flynncbb66aa2019-05-15 13:03:54 +01004609 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004610 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4611 // Batch 0
4612 9.0f,
4613
4614 // Batch 1
4615 18.0f
4616 }));
4617
Jim Flynncbb66aa2019-05-15 13:03:54 +01004618 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004619 LayerTestResult<T, 2> result(outputTensorInfo);
4620
4621 std::vector<T> output;
4622 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004623 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004624 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4625 { input0.data(), input1.data(), input2.data() },
4626 outputTensorInfo,
4627 output.data(),
4628 1,
4629 true);
telsoa014fcda012018-03-09 14:13:49 +00004630
4631 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4632 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4633 // Batch 0
4634 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4635
4636 // Batch 1
4637 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
4638 }));
4639
4640 return result;
4641}
4642
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004643LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
4644 armnn::IWorkloadFactory& workloadFactory,
4645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004646{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004647 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4648 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004649}
4650
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004651template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004652LayerTestResult<T, 3> Concatenation3dTestImpl(
4653 armnn::IWorkloadFactory& workloadFactory,
4654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004655 const armnn::TensorInfo& outputTensorInfo,
4656 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00004657 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00004658 float qScale,
4659 int32_t qOffset)
4660{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004661 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004662
4663 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4664 // Batch 0, Channel 0
4665 1.0f, 2.0f,
4666
4667 // Batch 0, Channel 1
4668 3.0f, 4.0f,
4669
4670 // Batch 0, Channel 2
4671 5.0f, 6.0f,
4672
4673 // Batch 1, Channel 0
4674 19.0f, 20.0f,
4675
4676 // Batch 1, Channel 1
4677 21.0f, 22.0f,
4678
4679 // Batch 1, Channel 2
4680 23.0f, 24.0f
4681 }));
4682
4683 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4684 // Batch 0, Channel 0
4685 7.0f, 8.0f,
4686
4687 // Batch 0, Channel 1
4688 9.0f, 10.0f,
4689
4690 // Batch 0, Channel 2
4691 11.0f, 12.0f,
4692
4693 // Batch 1, Channel 0
4694 25.0f, 26.0f,
4695
4696 // Batch 1, Channel 1
4697 27.0f, 28.0f,
4698
4699 // Batch 1, Channel 2
4700 29.0f, 30.0f
4701 }));
4702
4703 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4704 // Batch 0, Channel 0
4705 13.0f, 14.0f,
4706
4707 // Batch 0, Channel 1
4708 15.0f, 16.0f,
4709
4710 // Batch 0, Channel 2
4711 17.0f, 18.0f,
4712
4713 // Batch 1, Channel 0
4714 31.0f, 32.0f,
4715
4716 // Batch 1, Channel 1
4717 33.0f, 34.0f,
4718
4719 // Batch 1, Channel 2
4720 35.0f, 36.0f
4721 }));
4722
4723 LayerTestResult<T, 3> result(outputTensorInfo);
4724
4725 std::vector<T> output;
4726 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004727 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004728 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4729 { input0.data(), input1.data(), input2.data() },
4730 outputTensorInfo,
4731 output.data(),
4732 dimension,
4733 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004734
4735 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4736 return result;
4737}
4738
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004739template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004740LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
4741 armnn::IWorkloadFactory& workloadFactory,
4742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4743 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004744 int32_t qOffset)
4745{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004746 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004747
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004748 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4749 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4750
telsoa014fcda012018-03-09 14:13:49 +00004751 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4752 // Batch 0, Channel 0
4753 1.0f, 2.0f,
4754
4755 // Batch 0, Channel 1
4756 3.0f, 4.0f,
4757
4758 // Batch 0, Channel 2
4759 5.0f, 6.0f,
4760
4761 // Batch 1, Channel 0
4762 19.0f, 20.0f,
4763
4764 // Batch 1, Channel 1
4765 21.0f, 22.0f,
4766
4767 // Batch 1, Channel 2
4768 23.0f, 24.0f,
4769
4770 // Batch 2, Channel 0
4771 7.0f, 8.0f,
4772
4773 // Batch 2, Channel 1
4774 9.0f, 10.0f,
4775
4776 // Batch 2, Channel 2
4777 11.0f, 12.0f,
4778
4779 // Batch 3, Channel 0
4780 25.0f, 26.0f,
4781
4782 // Batch 3, Channel 1
4783 27.0f, 28.0f,
4784
4785 // Batch 3, Channel 2
4786 29.0f, 30.0f,
4787
4788 // Batch 4, Channel 0
4789 13.0f, 14.0f,
4790
4791 // Batch 4, Channel 1
4792 15.0f, 16.0f,
4793
4794 // Batch 4, Channel 2
4795 17.0f, 18.0f,
4796
4797 // Batch 5, Channel 0
4798 31.0f, 32.0f,
4799
4800 // Batch 5, Channel 1
4801 33.0f, 34.0f,
4802
4803 // Batch 5, Channel 2
4804 35.0f, 36.0f
4805 }));
narpra015cdda352018-11-19 15:30:27 +00004806
telsoa014fcda012018-03-09 14:13:49 +00004807 return result;
4808}
4809
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004810LayerTestResult<float, 3> Concatenation3dDim0Test(
4811 armnn::IWorkloadFactory& workloadFactory,
4812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004813{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004814 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004815}
4816
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004817template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004818LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4819 armnn::IWorkloadFactory& workloadFactory,
4820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4821 float qScale,
4822 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004823{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004824 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004825
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004826 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4827 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004828
telsoa014fcda012018-03-09 14:13:49 +00004829 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4830 // Batch 0, Channel 0
4831 1.0f, 2.0f,
4832
4833 // Batch 0, Channel 1
4834 3.0f, 4.0f,
4835
4836 // Batch 0, Channel 2
4837 5.0f, 6.0f,
4838
4839 // Batch 0, Channel 3
4840 7.0f, 8.0f,
4841
4842 // Batch 0, Channel 4
4843 9.0f, 10.0f,
4844
4845 // Batch 0, Channel 5
4846 11.0f, 12.0f,
4847
4848 // Batch 0, Channel 6
4849 13.0f, 14.0f,
4850
4851 // Batch 0, Channel 7
4852 15.0f, 16.0f,
4853
4854 // Batch 0, Channel 8
4855 17.0f, 18.0f,
4856
4857 // Batch 1, Channel 0
4858 19.0f, 20.0f,
4859
4860 // Batch 1, Channel 1
4861 21.0f, 22.0f,
4862
4863 // Batch 1, Channel 2
4864 23.0f, 24.0f,
4865
4866 // Batch 1, Channel 3
4867 25.0f, 26.0f,
4868
4869 // Batch 1, Channel 4
4870 27.0f, 28.0f,
4871
4872 // Batch 1, Channel 5
4873 29.0f, 30.0f,
4874
4875 // Batch 1, Channel 6
4876 31.0f, 32.0f,
4877
4878 // Batch 1, Channel 7
4879 33.0f, 34.0f,
4880
4881 // Batch 1, Channel 8
4882 35.0f, 36.0f
4883 }));
4884
4885 return result;
4886}
4887
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004888LayerTestResult<float, 3> Concatenation3dDim1Test(
4889 armnn::IWorkloadFactory& workloadFactory,
4890 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004891{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004892 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004893}
4894
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004895template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004896LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4897 armnn::IWorkloadFactory& workloadFactory,
4898 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004899 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004900 float qScale,
4901 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004902{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004903 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004904
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004905 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4906 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004907
telsoa014fcda012018-03-09 14:13:49 +00004908 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4909 // Batch 0, Channel 0
4910 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4911
4912 // Batch 0, Channel 1
4913 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
4914
4915 // Batch 0, Channel 2
4916 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
4917
4918 // Batch 1, Channel 0
4919 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
4920
4921 // Batch 1, Channel 1
4922 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
4923
4924 // Batch 1, Channel 2
4925 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
4926 }));
4927
4928 return result;
4929}
4930
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004931LayerTestResult<float, 3> Concatenation3dDim2Test(
4932 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4934 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004935{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004936 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
4937 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004938}
4939
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004940template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004941LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4942 armnn::IWorkloadFactory& workloadFactory,
4943 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4944 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004945 int32_t qOffset)
4946{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004947 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004948 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4949 // Batch 0, Channel 0
4950 1.0f, 2.0f,
4951
4952 // Batch 0, Channel 1
4953 3.0f, 4.0f,
4954
4955 // Batch 0, Channel 2
4956 5.0f, 6.0f,
4957
4958 // Batch 1, Channel 0
4959 19.0f, 20.0f,
4960
4961 // Batch 1, Channel 1
4962 21.0f, 22.0f,
4963
4964 // Batch 1, Channel 2
4965 23.0f, 24.0f
4966 }));
4967
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004968 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004969 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4970 // Batch 0, Channel 0
4971 7.0f, 8.0f,
4972
4973 // Batch 0, Channel 1
4974 9.0f, 10.0f,
4975
4976 // Batch 0, Channel 2
4977 11.0f, 12.0f,
4978 }));
4979
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004980 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004981 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4982 // Batch 0, Channel 0
4983 25.0f, 26.0f,
4984
4985 // Batch 0, Channel 1
4986 27.0f, 28.0f,
4987
4988 // Batch 0, Channel 2
4989 29.0f, 30.0f,
4990
4991 // Batch 1, Channel 0
4992 13.0f, 14.0f,
4993
4994 // Batch 1, Channel 1
4995 15.0f, 16.0f,
4996
4997 // Batch 1, Channel 2
4998 17.0f, 18.0f,
4999
5000 // Batch 2, Channel 0
5001 31.0f, 32.0f,
5002
5003 // Batch 2, Channel 1
5004 33.0f, 34.0f,
5005
5006 // Batch 2, Channel 2
5007 35.0f, 36.0f
5008 }));
5009
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005010 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005011 LayerTestResult<T, 3> result(outputTensorInfo);
5012
5013 std::vector<T> output;
5014 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005015 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005016 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5017 { input0.data(), input1.data(), input2.data() },
5018 outputTensorInfo,
5019 output.data(),
5020 0,
5021 true);
telsoa014fcda012018-03-09 14:13:49 +00005022
5023 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5024 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5025 // Batch 0, Channel 0
5026 1.0f, 2.0f,
5027
5028 // Batch 0, Channel 1
5029 3.0f, 4.0f,
5030
5031 // Batch 0, Channel 2
5032 5.0f, 6.0f,
5033
5034 // Batch 1, Channel 0
5035 19.0f, 20.0f,
5036
5037 // Batch 1, Channel 1
5038 21.0f, 22.0f,
5039
5040 // Batch 1, Channel 2
5041 23.0f, 24.0f,
5042
5043 // Batch 2, Channel 0
5044 7.0f, 8.0f,
5045
5046 // Batch 2, Channel 1
5047 9.0f, 10.0f,
5048
5049 // Batch 2, Channel 2
5050 11.0f, 12.0f,
5051
5052 // Batch 3, Channel 0
5053 25.0f, 26.0f,
5054
5055 // Batch 3, Channel 1
5056 27.0f, 28.0f,
5057
5058 // Batch 3, Channel 2
5059 29.0f, 30.0f,
5060
5061 // Batch 4, Channel 0
5062 13.0f, 14.0f,
5063
5064 // Batch 4, Channel 1
5065 15.0f, 16.0f,
5066
5067 // Batch 4, Channel 2
5068 17.0f, 18.0f,
5069
5070 // Batch 5, Channel 0
5071 31.0f, 32.0f,
5072
5073 // Batch 5, Channel 1
5074 33.0f, 34.0f,
5075
5076 // Batch 5, Channel 2
5077 35.0f, 36.0f
5078 }));
5079
5080 return result;
5081}
5082
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005083LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
5084 armnn::IWorkloadFactory& workloadFactory,
5085 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005086{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005087 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5088 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005089}
5090
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005091template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005092LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
5093 armnn::IWorkloadFactory& workloadFactory,
5094 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5095 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005096 int32_t qOffset)
5097{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005098 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005099 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5100 // Batch 0, Channel 0
5101 1.0f, 2.0f,
5102
5103 // Batch 0, Channel 1
5104 3.0f, 4.0f,
5105
5106 // Batch 0, Channel 2
5107 5.0f, 6.0f,
5108
5109 // Batch 1, Channel 0
5110 19.0f, 20.0f,
5111
5112 // Batch 1, Channel 1
5113 21.0f, 22.0f,
5114
5115 // Batch 1, Channel 2
5116 23.0f, 24.0f
5117 }));
5118
Jim Flynncbb66aa2019-05-15 13:03:54 +01005119 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005120 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5121 // Batch 0, Channel 0
5122 7.0f, 8.0f,
5123
5124 // Batch 0, Channel 1
5125 9.0f, 10.0f,
5126
5127 // Batch 0, Channel 2
5128 11.0f, 12.0f,
5129
5130 // Batch 0, Channel 3
5131 25.0f, 26.0f,
5132
5133 // Batch 1, Channel 0
5134 27.0f, 28.0f,
5135
5136 // Batch 1, Channel 1
5137 29.0f, 30.0f,
5138
5139 // Batch 1, Channel 2
5140 13.0f, 14.0f,
5141
5142 // Batch 1, Channel 3
5143 15.0f, 16.0f,
5144 }));
5145
Jim Flynncbb66aa2019-05-15 13:03:54 +01005146 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005147 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5148 // Batch 0, Channel 0
5149 17.0f, 18.0f,
5150
5151 // Batch 1, Channel 0
5152 31.0f, 32.0f,
5153 }));
5154
Jim Flynncbb66aa2019-05-15 13:03:54 +01005155 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005156 LayerTestResult<T, 3> result(outputTensorInfo);
5157
5158 std::vector<T> output;
5159 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005160 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005161 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5162 { input0.data(), input1.data(), input2.data() },
5163 outputTensorInfo,
5164 output.data(),
5165 1,
5166 true);
telsoa014fcda012018-03-09 14:13:49 +00005167
5168 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5169 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5170 // Batch 0, Channel 0
5171 1.0f, 2.0f,
5172
5173 // Batch 0, Channel 1
5174 3.0f, 4.0f,
5175
5176 // Batch 0, Channel 2
5177 5.0f, 6.0f,
5178
5179 // Batch 0, Channel 3
5180 7.0f, 8.0f,
5181
5182 // Batch 0, Channel 4
5183 9.0f, 10.0f,
5184
5185 // Batch 0, Channel 5
5186 11.0f, 12.0f,
5187
5188 // Batch 0, Channel 6
5189 25.0f, 26.0f,
5190
5191 // Batch 0, Channel 7
5192 17.0f, 18.0f,
5193
5194 // Batch 1, Channel 0
5195 19.0f, 20.0f,
5196
5197 // Batch 1, Channel 1
5198 21.0f, 22.0f,
5199
5200 // Batch 1, Channel 2
5201 23.0f, 24.0f,
5202
5203 // Batch 1, Channel 3
5204 27.0f, 28.0f,
5205
5206 // Batch 1, Channel 4
5207 29.0f, 30.0f,
5208
5209 // Batch 1, Channel 5
5210 13.0f, 14.0f,
5211
5212 // Batch 1, Channel 6
5213 15.0f, 16.0f,
5214
5215 // Batch 1, Channel 7
5216 31.0f, 32.0f,
5217 }));
5218
5219 return result;
5220}
5221
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005222LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5223 armnn::IWorkloadFactory& workloadFactory,
5224 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005225{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005226 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5227 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005228}
5229
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005230template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005231LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5232 armnn::IWorkloadFactory& workloadFactory,
5233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005234 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005235 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005236 int32_t qOffset)
5237{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005238 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005239 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5240 // Batch 0, Channel 0
5241 1.0f, 2.0f,
5242
5243 // Batch 0, Channel 1
5244 3.0f, 4.0f,
5245
5246 // Batch 0, Channel 2
5247 5.0f, 6.0f,
5248
5249 // Batch 1, Channel 0
5250 19.0f, 20.0f,
5251
5252 // Batch 1, Channel 1
5253 21.0f, 22.0f,
5254
5255 // Batch 1, Channel 2
5256 23.0f, 24.0f
5257 }));
5258
Jim Flynncbb66aa2019-05-15 13:03:54 +01005259 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005260 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5261 // Batch 0, Channel 0
5262 7.0f,
5263
5264 // Batch 0, Channel 1
5265 9.0f,
5266
5267 // Batch 0, Channel 2
5268 11.0f,
5269
5270 // Batch 1, Channel 0
5271 25.0f,
5272
5273 // Batch 1, Channel 1
5274 27.0f,
5275
5276 // Batch 1, Channel 2
5277 29.0f
5278 }));
5279
Jim Flynncbb66aa2019-05-15 13:03:54 +01005280 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005281 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5282 // Batch 0, Channel 0
5283 13.0f, 14.0f, 50.0f,
5284
5285 // Batch 0, Channel 1
5286 15.0f, 16.0f, 51.0f,
5287
5288 // Batch 0, Channel 2
5289 17.0f, 18.0f, 52.0f,
5290
5291 // Batch 1, Channel 0
5292 31.0f, 32.0f, 53.0f,
5293
5294 // Batch 1, Channel 1
5295 33.0f, 34.0f, 54.0f,
5296
5297 // Batch 1, Channel 2
5298 35.0f, 36.0f, 55.0f,
5299 }));
5300
Jim Flynncbb66aa2019-05-15 13:03:54 +01005301 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005302 LayerTestResult<T, 3> result(outputTensorInfo);
5303
5304 std::vector<T> output;
5305 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005306 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005307 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5308 { input0.data(), input1.data(), input2.data() },
5309 outputTensorInfo,
5310 output.data(),
5311 2,
5312 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005313
5314 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5315 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5316 // Batch 0, Channel 0
5317 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5318
5319 // Batch 0, Channel 1
5320 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5321
5322 // Batch 0, Channel 2
5323 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5324
5325 // Batch 1, Channel 0
5326 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5327
5328 // Batch 1, Channel 1
5329 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5330
5331 // Batch 1, Channel 2
5332 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5333 }));
5334
5335 return result;
5336}
5337
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005338LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5339 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5341 bool useSubtensor)
5342{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005343 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5344 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005345}
5346
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005347template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005348LayerTestResult<T, 4> Concatenation4dTestImpl(
5349 armnn::IWorkloadFactory& workloadFactory,
5350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5351 const armnn::TensorInfo& outputTensorInfo,
5352 unsigned int dimension,
5353 bool useSubtensor,
5354 float qScale,
5355 int32_t qOffset)
5356{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005357 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005358
5359 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5360 1.0f, 2.0f,
5361 3.0f, 4.0f,
5362 5.0f, 6.0f,
5363 7.0f, 8.0f,
5364 9.0f, 10.0f,
5365 11.0f, 12.0f
5366 }));
5367
5368 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5369 11.0f, 12.0f,
5370 13.0f, 14.0f,
5371 15.0f, 16.0f,
5372 17.0f, 18.0f,
5373 19.0f, 20.0f,
5374 21.0f, 22.0f
5375 }));
5376
5377 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5378 21.0f, 22.0f,
5379 23.0f, 24.0f,
5380 25.0f, 26.0f,
5381 27.0f, 28.0f,
5382 29.0f, 30.0f,
5383 31.0f, 32.0f
5384 }));
5385
5386 LayerTestResult<T, 4> result(outputTensorInfo);
5387
5388 std::vector<T> output;
5389 output.resize(outputTensorInfo.GetNumElements());
5390
5391 Concatenate<T>(workloadFactory,
5392 memoryManager,
5393 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5394 {input0.data(), input1.data(), input2.data()},
5395 outputTensorInfo,
5396 output.data(),
5397 dimension,
5398 useSubtensor);
5399
5400 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5401 return result;
5402}
5403
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005404template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005405LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5406 armnn::IWorkloadFactory& workloadFactory,
5407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5408 float qScale,
5409 int32_t qOffset)
5410{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005411 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005412
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005413 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5414 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5415
narpra015cdda352018-11-19 15:30:27 +00005416 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5417 1.0f, 2.0f,
5418 3.0f, 4.0f,
5419 5.0f, 6.0f,
5420 7.0f, 8.0f,
5421 9.0f, 10.0f,
5422 11.0f, 12.0f,
5423
5424 11.0f, 12.0f,
5425 13.0f, 14.0f,
5426 15.0f, 16.0f,
5427 17.0f, 18.0f,
5428 19.0f, 20.0f,
5429 21.0f, 22.0f,
5430
5431 21.0f, 22.0f,
5432 23.0f, 24.0f,
5433 25.0f, 26.0f,
5434 27.0f, 28.0f,
5435 29.0f, 30.0f,
5436 31.0f, 32.0f
5437 }));
5438 return result;
5439}
5440
5441LayerTestResult<float, 4> Concatenation4dDim0Test(
5442 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005443 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005444{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005445 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005446}
5447
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005448template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005449LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5450 armnn::IWorkloadFactory& workloadFactory,
5451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5452 float qScale,
5453 int32_t qOffset)
5454{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005455 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005456
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005457 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5458 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5459
narpra015cdda352018-11-19 15:30:27 +00005460 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5461 1.0f, 2.0f,
5462 3.0f, 4.0f,
5463 5.0f, 6.0f,
5464 7.0f, 8.0f,
5465 9.0f, 10.0f,
5466 11.0f, 12.0f,
5467
5468 11.0f, 12.0f,
5469 13.0f, 14.0f,
5470 15.0f, 16.0f,
5471 17.0f, 18.0f,
5472 19.0f, 20.0f,
5473 21.0f, 22.0f,
5474
5475 21.0f, 22.0f,
5476 23.0f, 24.0f,
5477 25.0f, 26.0f,
5478 27.0f, 28.0f,
5479 29.0f, 30.0f,
5480 31.0f, 32.0f
5481 }));
5482
5483 return result;
5484}
5485
5486LayerTestResult<float, 4> Concatenation4dDim1Test(
5487 armnn::IWorkloadFactory& workloadFactory,
5488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5489{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005490 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005491}
5492
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005493template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005494LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5495 armnn::IWorkloadFactory& workloadFactory,
5496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5497 float qScale,
5498 int32_t qOffset)
5499{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005500 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005501
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005502 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5503 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5504
narpra015cdda352018-11-19 15:30:27 +00005505 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5506 1.0f, 2.0f,
5507 3.0f, 4.0f,
5508 11.0f, 12.0f,
5509 13.0f, 14.0f,
5510 21.0f, 22.0f,
5511 23.0f, 24.0f,
5512
5513 5.0f, 6.0f,
5514 7.0f, 8.0f,
5515 15.0f, 16.0f,
5516 17.0f, 18.0f,
5517 25.0f, 26.0f,
5518 27.0f, 28.0f,
5519
5520 9.0f, 10.0f,
5521 11.0f, 12.0f,
5522 19.0f, 20.0f,
5523 21.0f, 22.0f,
5524 29.0f, 30.0f,
5525 31.0f, 32.0f
5526 }));
5527
5528 return result;
5529}
5530
5531LayerTestResult<float, 4> Concatenation4dDim2Test(
5532 armnn::IWorkloadFactory& workloadFactory,
5533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5534{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005535 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005536}
5537
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005538template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005539LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5540 armnn::IWorkloadFactory& workloadFactory,
5541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5542 float qScale,
5543 int32_t qOffset,
5544 bool useSubtensor)
5545{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005546 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005547
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005548 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5549 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5550
narpra015cdda352018-11-19 15:30:27 +00005551 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5552 1.0f, 2.0f,
5553 11.0f, 12.0f,
5554 21.0f, 22.0f,
5555 3.0f, 4.0f,
5556 13.0f, 14.0f,
5557 23.0f, 24.0f,
5558
5559 5.0f, 6.0f,
5560 15.0f, 16.0f,
5561 25.0f, 26.0f,
5562 7.0f, 8.0f,
5563 17.0f, 18.0f,
5564 27.0f, 28.0f,
5565
5566 9.0f, 10.0f,
5567 19.0f, 20.0f,
5568 29.0f, 30.0f,
5569 11.0f, 12.0f,
5570 21.0f, 22.0f,
5571 31.0f, 32.0f
5572 }));
5573
5574 return result;
5575}
5576
5577LayerTestResult<float, 4> Concatenation4dDim3Test(
5578 armnn::IWorkloadFactory& workloadFactory,
5579 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5580 bool useSubtensor)
5581{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005582 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
5583 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00005584}
5585
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005586template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005587LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
5588 armnn::IWorkloadFactory& workloadFactory,
5589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5590 float qScale,
5591 int32_t qOffset)
5592{
5593 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005594 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005595
5596 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5597 1.0f, 2.0f,
5598 3.0f, 4.0f,
5599 5.0f, 6.0f,
5600 7.0f, 8.0f,
5601 9.0f, 10.0f,
5602 11.0f, 12.0f
5603 }));
5604
Jim Flynncbb66aa2019-05-15 13:03:54 +01005605 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005606
5607 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5608 11.0f, 12.0f,
5609 13.0f, 14.0f,
5610 15.0f, 16.0f,
5611 17.0f, 18.0f,
5612 19.0f, 20.0f,
5613 21.0f, 22.0f,
5614
5615 21.0f, 22.0f,
5616 23.0f, 24.0f,
5617 25.0f, 26.0f,
5618 27.0f, 28.0f,
5619 29.0f, 30.0f,
5620 31.0f, 32.0f
5621
5622 }));
5623
Jim Flynncbb66aa2019-05-15 13:03:54 +01005624 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005625
5626 LayerTestResult<T, 4> result(outputTensorInfo);
5627
5628 std::vector<T> output;
5629 output.resize(outputTensorInfo.GetNumElements());
5630 Concatenate<T>(workloadFactory,
5631 memoryManager,
5632 {inputTensorInfo0, inputTensorInfo1},
5633 {input0.data(), input1.data()},
5634 outputTensorInfo,
5635 output.data(),
5636 dimension,
5637 true);
5638
5639 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5640 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5641 1.0f, 2.0f,
5642 3.0f, 4.0f,
5643 5.0f, 6.0f,
5644 7.0f, 8.0f,
5645 9.0f, 10.0f,
5646 11.0f, 12.0f,
5647
5648 11.0f, 12.0f,
5649 13.0f, 14.0f,
5650 15.0f, 16.0f,
5651 17.0f, 18.0f,
5652 19.0f, 20.0f,
5653 21.0f, 22.0f,
5654
5655 21.0f, 22.0f,
5656 23.0f, 24.0f,
5657 25.0f, 26.0f,
5658 27.0f, 28.0f,
5659 29.0f, 30.0f,
5660 31.0f, 32.0f
5661 }));
5662
5663 return result;
5664}
5665
5666LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
5667 armnn::IWorkloadFactory& workloadFactory,
5668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5669{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005670 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
5671 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005672}
5673
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005674template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005675LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
5676 armnn::IWorkloadFactory& workloadFactory,
5677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5678 float qScale,
5679 int32_t qOffset)
5680{
5681 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005682 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005683
5684 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5685 1.0f, 2.0f,
5686 3.0f, 4.0f,
5687 5.0f, 6.0f,
5688 7.0f, 8.0f,
5689 9.0f, 10.0f,
5690 11.0f, 12.0f
5691 }));
5692
Jim Flynncbb66aa2019-05-15 13:03:54 +01005693 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005694
5695 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5696 11.0f, 12.0f,
5697 13.0f, 14.0f,
5698 15.0f, 16.0f,
5699 17.0f, 18.0f,
5700
5701 }));
5702
Jim Flynncbb66aa2019-05-15 13:03:54 +01005703 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005704
5705 LayerTestResult<T, 4> result(outputTensorInfo);
5706
5707 std::vector<T> output;
5708 output.resize(outputTensorInfo.GetNumElements());
5709 Concatenate<T>(workloadFactory,
5710 memoryManager,
5711 {inputTensorInfo0, inputTensorInfo1},
5712 {input0.data(), input1.data()},
5713 outputTensorInfo,
5714 output.data(),
5715 dimension,
5716 true);
5717
5718 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5719 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5720 1.0f, 2.0f,
5721 3.0f, 4.0f,
5722 5.0f, 6.0f,
5723 7.0f, 8.0f,
5724 9.0f, 10.0f,
5725 11.0f, 12.0f,
5726 11.0f, 12.0f,
5727 13.0f, 14.0f,
5728 15.0f, 16.0f,
5729 17.0f, 18.0f
5730 }));
5731
5732 return result;
5733}
5734
5735LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
5736 armnn::IWorkloadFactory& workloadFactory,
5737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5738{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005739 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
5740 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005741}
5742
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005743template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005744LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5745 armnn::IWorkloadFactory& workloadFactory,
5746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5747 float qScale,
5748 int32_t qOffset)
5749{
5750 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005751 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005752
5753 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5754 1.0f, 2.0f,
5755 3.0f, 4.0f,
5756 5.0f, 6.0f,
5757 7.0f, 8.0f,
5758 9.0f, 10.0f,
5759 11.0f, 12.0f
5760 }));
5761
Jim Flynncbb66aa2019-05-15 13:03:54 +01005762 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005763
5764 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5765 11.0f, 12.0f,
5766 13.0f, 14.0f,
5767 15.0f, 16.0f,
5768 17.0f, 18.0f,
5769 19.0f, 20.0f,
5770 21.0f, 22.0f,
5771 23.0f, 24.0f,
5772 25.0f, 26.0f,
5773 27.0f, 28.0f
5774 }));
5775
Jim Flynncbb66aa2019-05-15 13:03:54 +01005776 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005777
5778 LayerTestResult<T, 4> result(outputTensorInfo);
5779
5780 std::vector<T> output;
5781 output.resize(outputTensorInfo.GetNumElements());
5782 Concatenate<T>(workloadFactory,
5783 memoryManager,
5784 {inputTensorInfo0, inputTensorInfo1},
5785 {input0.data(), input1.data()},
5786 outputTensorInfo,
5787 output.data(),
5788 dimension,
5789 true);
5790
5791 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5792 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5793 1.0f, 2.0f,
5794 3.0f, 4.0f,
5795 11.0f, 12.0f,
5796 13.0f, 14.0f,
5797 15.0f, 16.0f,
5798
5799 5.0f, 6.0f,
5800 7.0f, 8.0f,
5801 17.0f, 18.0f,
5802 19.0f, 20.0f,
5803 21.0f, 22.0f,
5804
5805 9.0f, 10.0f,
5806 11.0f, 12.0f,
5807 23.0f, 24.0f,
5808 25.0f, 26.0f,
5809 27.0f, 28.0f
5810 }));
5811
5812 return result;
5813}
5814
5815LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5816 armnn::IWorkloadFactory& workloadFactory,
5817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5818{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005819 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5820 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005821}
5822
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005823template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005824LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5825 armnn::IWorkloadFactory& workloadFactory,
5826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5827 float qScale,
5828 int32_t qOffset,
5829 bool useSubtensor)
5830{
5831 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005832 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005833
5834 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5835 1.0f, 2.0f,
5836 3.0f, 4.0f,
5837 5.0f, 6.0f,
5838 7.0f, 8.0f,
5839 9.0f, 10.0f,
5840 11.0f, 12.0f
5841 }));
5842
Jim Flynncbb66aa2019-05-15 13:03:54 +01005843 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005844
5845 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5846 11.0f, 12.0f, 13.0f,
5847 14.0f, 15.0f, 16.0f,
5848
5849 17.0f, 18.0f, 19.0f,
5850 20.0f, 21.0f, 22.0f,
5851
5852 23.0f, 24.0f, 25.0f,
5853 26.0f, 27.0f, 28.0f
5854 }));
5855
Jim Flynncbb66aa2019-05-15 13:03:54 +01005856 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005857
5858 LayerTestResult<T, 4> result(outputTensorInfo);
5859
5860 std::vector<T> output;
5861 output.resize(outputTensorInfo.GetNumElements());
5862 Concatenate<T>(workloadFactory,
5863 memoryManager,
5864 {inputTensorInfo0, inputTensorInfo1},
5865 {input0.data(), input1.data()},
5866 outputTensorInfo,
5867 output.data(),
5868 dimension,
5869 useSubtensor);
5870
5871 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5872 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5873 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5874 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5875 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5876 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5877 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5878 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5879 }));
5880
5881 return result;
5882}
5883
5884LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5885 armnn::IWorkloadFactory& workloadFactory,
5886 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5887 bool useSubtensor)
5888{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005889 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5890 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005891}
5892
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005893LayerTestResult<float, 2> FakeQuantizationTest(
5894 armnn::IWorkloadFactory& workloadFactory,
5895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005896{
5897 constexpr unsigned int width = 2;
5898 constexpr unsigned int height = 3;
5899
5900 const armnn::TensorInfo tensorInfo({height, width },
5901 armnn::DataType::Float32);
5902 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5903 -10.0f, -5.0f,
5904 0.0f, 5.0f,
5905 10.0f, 10.0f
5906 }));
5907
5908 LayerTestResult<float, 2> ret(tensorInfo);
5909
5910 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5911
5912 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5913
5914 armnn::FakeQuantizationQueueDescriptor data;
5915 armnn::WorkloadInfo info;
5916
5917 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5918 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5919 float min = -10.f;
5920 float max = 10.f;
5921
5922 data.m_Parameters.m_Min = min;
5923 data.m_Parameters.m_Max = max;
5924
5925 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5926 armnn::FakeQuantizationQueueDescriptor refData = data;
5927 armnn::WorkloadInfo refInfo = info;
5928 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5929
5930 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5931
5932 inputHandle->Allocate();
5933 outputHandle->Allocate();
5934
5935 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5936
Derek Lambertif30f7d32019-04-09 10:25:02 +01005937 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005938 workload->Execute();
5939
5940 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5941
5942 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5943 0.0f, 63.0f,
5944 128.0f, 191.0f,
5945 255.0f, 255.0f
5946 }));
5947 return ret;
5948}
5949
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005950namespace
5951{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005952template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5953LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005954 armnn::IWorkloadFactory& workloadFactory,
5955 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5956 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005957 float scale,
5958 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005959 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005960 float outScale,
5961 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005962 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01005963 const armnn::DataLayout layout,
5964 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005965{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005966 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
5967 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005968
jimfly013aab7c32018-11-12 13:32:08 +00005969 // at this point if we require it permute the input data
5970 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5971 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005972 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005973 {
5974 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005975 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005976 inputData = tmp;
5977 }
5978
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005979 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
5980 inputTensorInfo.GetQuantizationScale(),
5981 inputTensorInfo.GetQuantizationOffset(),
5982 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005983
jimfly013aab7c32018-11-12 13:32:08 +00005984 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005985 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005986 {
5987 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005988 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
5989 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005990 expectedOutputData = tmp;
5991 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005992
5993 LayerTestResult<T, 4> result(outputTensorInfo);
5994 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
5995 outputTensorInfo.GetQuantizationScale(),
5996 outputTensorInfo.GetQuantizationOffset(),
5997 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005998
5999 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6000 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6001
6002 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01006003 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00006004 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006005 armnn::WorkloadInfo info;
6006
6007 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6008 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6009
6010 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
6011
6012 inputHandle->Allocate();
6013 outputHandle->Allocate();
6014
6015 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6016
Derek Lambertif30f7d32019-04-09 10:25:02 +01006017 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006018 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006019
6020 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6021
6022 return result;
6023}
6024
6025float CalcInvL2Norm(std::initializer_list<float> elements)
6026{
6027 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
6028 [](float acc, float element) { return acc + element * element; });
6029 return 1.0f / sqrtf(reduction);
6030}
6031
6032} // anonymous namespace
6033
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006034template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006035LayerTestResult<T, 2> Pad2dTestCommon(
6036 armnn::IWorkloadFactory& workloadFactory,
6037 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6038 float qScale,
David Monahan34757812019-06-19 11:47:21 +01006039 int32_t qOffset,
6040 const float customPaddingValue = 0)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006041{
Derek Lambertif30f7d32019-04-09 10:25:02 +01006042 const armnn::TensorShape inputShape{ 3, 3 };
6043 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006044
David Monahan34757812019-06-19 11:47:21 +01006045 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6046 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006047
Derek Lambertif30f7d32019-04-09 10:25:02 +01006048 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006049 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006050 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006051 // Height (3) x Width (3)
6052 4, 8, 6,
6053 7, 4, 4,
6054 3, 2, 4
6055 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006056
David Monahan34757812019-06-19 11:47:21 +01006057 const T padValue = ConvertToDataType<T>(customPaddingValue, inputTensorInfo);
6058
6059 std::vector<T> expectedOutputValues;
6060 if (padValue == 0)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006061 {
David Monahan34757812019-06-19 11:47:21 +01006062 expectedOutputValues = (
6063 QuantizedVector<T>(qScale, qOffset,
6064 {
6065 0, 0, 0, 0, 0, 0, 0,
6066 0, 0, 0, 0, 0, 0, 0,
6067 0, 0, 4, 8, 6, 0, 0,
6068 0, 0, 7, 4, 4, 0, 0,
6069 0, 0, 3, 2, 4, 0, 0,
6070 0, 0, 0, 0, 0, 0, 0,
6071 0, 0, 0, 0, 0, 0, 0
6072 }));
6073 }
6074 else
6075 {
6076 expectedOutputValues = (
6077 QuantizedVector<T>(qScale, qOffset,
6078 {
6079 1, 1, 1, 1, 1, 1, 1,
6080 1, 1, 1, 1, 1, 1, 1,
6081 1, 1, 4, 8, 6, 1, 1,
6082 1, 1, 7, 4, 4, 1, 1,
6083 1, 1, 3, 2, 4, 1, 1,
6084 1, 1, 1, 1, 1, 1, 1,
6085 1, 1, 1, 1, 1, 1, 1
6086 }));
6087 }
6088
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006089
Derek Lambertif30f7d32019-04-09 10:25:02 +01006090 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006091
Derek Lambertif30f7d32019-04-09 10:25:02 +01006092 LayerTestResult<T, 2> result(outputTensorInfo);
6093 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006094
Derek Lambertif30f7d32019-04-09 10:25:02 +01006095 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6096 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006097
Derek Lambertif30f7d32019-04-09 10:25:02 +01006098 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006099
Derek Lambertif30f7d32019-04-09 10:25:02 +01006100 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6101 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6102 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006103
Derek Lambertif30f7d32019-04-09 10:25:02 +01006104 descriptor.m_Parameters.m_PadList = PadList;
6105 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006106
Derek Lambertif30f7d32019-04-09 10:25:02 +01006107 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6108 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006109
Derek Lambertif30f7d32019-04-09 10:25:02 +01006110 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006111
Derek Lambertif30f7d32019-04-09 10:25:02 +01006112 inputHandle->Allocate();
6113 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006114
Derek Lambertif30f7d32019-04-09 10:25:02 +01006115 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006116
Derek Lambertif30f7d32019-04-09 10:25:02 +01006117 workload->PostAllocationConfigure();
6118 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006119
Derek Lambertif30f7d32019-04-09 10:25:02 +01006120 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006121
Derek Lambertif30f7d32019-04-09 10:25:02 +01006122 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006123}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006124
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006125template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006126LayerTestResult<T, 3> Pad3dTestCommon(
6127 armnn::IWorkloadFactory& workloadFactory,
6128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6129 float qScale,
6130 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006131{
6132 const armnn::TensorShape inputShape{ 2, 2, 2 };
6133 const armnn::TensorShape outputShape{ 3, 5, 6 };
6134
David Monahan34757812019-06-19 11:47:21 +01006135 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6136 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006137
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006138 std::vector<T> inputValues(
6139 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006140 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006141 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006142 0, 4,
6143 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006144
6145 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006146 6, 1,
6147 5, 2
6148 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006149
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006150 std::vector<T> expectedOutputValues(
6151 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006152 {
6153
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006154 0, 0, 0, 0, 0, 0,
6155 0, 0, 0, 0, 0, 0,
6156 0, 0, 0, 4, 0, 0,
6157 0, 0, 2, 5, 0, 0,
6158 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006159
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006160 0, 0, 0, 0, 0, 0,
6161 0, 0, 0, 0, 0, 0,
6162 0, 0, 6, 1, 0, 0,
6163 0, 0, 5, 2, 0, 0,
6164 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006165
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006166 0, 0, 0, 0, 0, 0,
6167 0, 0, 0, 0, 0, 0,
6168 0, 0, 0, 0, 0, 0,
6169 0, 0, 0, 0, 0, 0,
6170 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006171
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006172 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006173
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006174 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006175
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006176 LayerTestResult<T, 3> result(outputTensorInfo);
6177 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006178
6179 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6180 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6181
6182 armnn::PadQueueDescriptor descriptor;
6183
6184 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6185 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6186 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6187 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6188
6189 descriptor.m_Parameters.m_PadList = PadList;
6190 armnn::WorkloadInfo info;
6191
6192 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6193 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6194
6195 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6196
6197 inputHandle->Allocate();
6198 outputHandle->Allocate();
6199
6200 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6201
Derek Lambertif30f7d32019-04-09 10:25:02 +01006202 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006203 workload->Execute();
6204
6205 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6206
6207 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006208}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006209
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006210template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006211LayerTestResult<T, 4> Pad4dTestCommon(
6212 armnn::IWorkloadFactory& workloadFactory,
6213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6214 float qScale,
6215 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006216{
6217 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6218 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6219
David Monahan34757812019-06-19 11:47:21 +01006220 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6221 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006222
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006223 std::vector<T> inputValues(
6224 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006225 {
6226 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006227 0, 1,
6228 2, 3,
6229 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006230
6231 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006232 6, 7,
6233 8, 9,
6234 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006235
6236 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006237 12, 13,
6238 14, 15,
6239 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006240
6241 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006242 18, 19,
6243 20, 21,
6244 22, 23
6245 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006246
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006247 std::vector<T> expectedOutputValues(
6248 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006249 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006250 0, 0, 0, 0,
6251 0, 0, 0, 0,
6252 0, 0, 0, 0,
6253 0, 0, 0, 0,
6254 0, 0, 0, 0,
6255 0, 0, 0, 0,
6256 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006257
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006258 0, 0, 0, 0,
6259 0, 0, 0, 0,
6260 0, 0, 0, 0,
6261 0, 0, 0, 0,
6262 0, 0, 0, 0,
6263 0, 0, 0, 0,
6264 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006265
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006266 0, 0, 0, 0,
6267 0, 0, 0, 0,
6268 0, 0, 0, 0,
6269 0, 0, 0, 0,
6270 0, 0, 0, 0,
6271 0, 0, 0, 0,
6272 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006273
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006274 0, 0, 0, 0,
6275 0, 0, 0, 0,
6276 0, 0, 0, 0,
6277 0, 0, 0, 0,
6278 0, 0, 0, 0,
6279 0, 0, 0, 0,
6280 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006281
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006282 0, 0, 0, 0,
6283 0, 0, 0, 0,
6284 0, 0, 0, 0,
6285 0, 0, 0, 0,
6286 0, 0, 0, 0,
6287 0, 0, 0, 0,
6288 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006289
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006290 0, 0, 0, 0,
6291 0, 0, 0, 0,
6292 0, 0, 0, 0,
6293 0, 0, 0, 0,
6294 0, 0, 0, 0,
6295 0, 0, 0, 0,
6296 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006297
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006298 0, 0, 0, 0,
6299 0, 0, 0, 0,
6300 0, 0, 0, 0,
6301 0, 0, 0, 0,
6302 0, 0, 0, 0,
6303 0, 0, 0, 0,
6304 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006305
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006306 0, 0, 0, 0,
6307 0, 0, 0, 0,
6308 0, 0, 0, 0,
6309 0, 0, 1, 0,
6310 0, 2, 3, 0,
6311 0, 4, 5, 0,
6312 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006313
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006314 0, 0, 0, 0,
6315 0, 0, 0, 0,
6316 0, 0, 0, 0,
6317 0, 6, 7, 0,
6318 0, 8, 9, 0,
6319 0, 10, 11, 0,
6320 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006321
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006322 0, 0, 0, 0,
6323 0, 0, 0, 0,
6324 0, 0, 0, 0,
6325 0, 0, 0, 0,
6326 0, 0, 0, 0,
6327 0, 0, 0, 0,
6328 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006329
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006330 0, 0, 0, 0,
6331 0, 0, 0, 0,
6332 0, 0, 0, 0,
6333 0, 0, 0, 0,
6334 0, 0, 0, 0,
6335 0, 0, 0, 0,
6336 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006337
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006338 0, 0, 0, 0,
6339 0, 0, 0, 0,
6340 0, 0, 0, 0,
6341 0, 0, 0, 0,
6342 0, 0, 0, 0,
6343 0, 0, 0, 0,
6344 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006345
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006346 0, 0, 0, 0,
6347 0, 0, 0, 0,
6348 0, 0, 0, 0,
6349 0, 12, 13, 0,
6350 0, 14, 15, 0,
6351 0, 16, 17, 0,
6352 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006353
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006354 0, 0, 0, 0,
6355 0, 0, 0, 0,
6356 0, 0, 0, 0,
6357 0, 18, 19, 0,
6358 0, 20, 21, 0,
6359 0, 22, 23, 0,
6360 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006361
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006362 0, 0, 0, 0,
6363 0, 0, 0, 0,
6364 0, 0, 0, 0,
6365 0, 0, 0, 0,
6366 0, 0, 0, 0,
6367 0, 0, 0, 0,
6368 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006369
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006370 0, 0, 0, 0,
6371 0, 0, 0, 0,
6372 0, 0, 0, 0,
6373 0, 0, 0, 0,
6374 0, 0, 0, 0,
6375 0, 0, 0, 0,
6376 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006377
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006378 0, 0, 0, 0,
6379 0, 0, 0, 0,
6380 0, 0, 0, 0,
6381 0, 0, 0, 0,
6382 0, 0, 0, 0,
6383 0, 0, 0, 0,
6384 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006385
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006386 0, 0, 0, 0,
6387 0, 0, 0, 0,
6388 0, 0, 0, 0,
6389 0, 0, 0, 0,
6390 0, 0, 0, 0,
6391 0, 0, 0, 0,
6392 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006393
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006394 0, 0, 0, 0,
6395 0, 0, 0, 0,
6396 0, 0, 0, 0,
6397 0, 0, 0, 0,
6398 0, 0, 0, 0,
6399 0, 0, 0, 0,
6400 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006401
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006402 0, 0, 0, 0,
6403 0, 0, 0, 0,
6404 0, 0, 0, 0,
6405 0, 0, 0, 0,
6406 0, 0, 0, 0,
6407 0, 0, 0, 0,
6408 0, 0, 0, 0
6409 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006410
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006411 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006412
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006413 LayerTestResult<T, 4> result(outputTensorInfo);
6414 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006415
6416 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6417 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6418
6419 armnn::PadQueueDescriptor descriptor;
6420
6421 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6422 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6423 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6424 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6425 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6426
6427 descriptor.m_Parameters.m_PadList = PadList;
6428 armnn::WorkloadInfo info;
6429
6430 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6431 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6432
6433 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6434
6435 inputHandle->Allocate();
6436 outputHandle->Allocate();
6437
6438 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6439
Derek Lambertif30f7d32019-04-09 10:25:02 +01006440 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006441 workload->Execute();
6442
6443 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6444
6445 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006446}
6447
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006448LayerTestResult<uint8_t, 2> PadUint82dTest(
6449 armnn::IWorkloadFactory& workloadFactory,
6450 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006451{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006452 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006453}
6454
David Monahan34757812019-06-19 11:47:21 +01006455LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
6456 armnn::IWorkloadFactory& workloadFactory,
6457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6458{
6459 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
6460}
6461
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006462LayerTestResult<uint8_t, 3> PadUint83dTest(
6463 armnn::IWorkloadFactory& workloadFactory,
6464 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006465{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006466 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006467}
6468
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006469LayerTestResult<uint8_t, 4> PadUint84dTest(
6470 armnn::IWorkloadFactory& workloadFactory,
6471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006472{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006473 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006474}
6475
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006476LayerTestResult<float, 2> PadFloat322dTest(
6477 armnn::IWorkloadFactory& workloadFactory,
6478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006479{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006480 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006481}
6482
David Monahan34757812019-06-19 11:47:21 +01006483LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
6484 armnn::IWorkloadFactory& workloadFactory,
6485 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6486{
6487 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6488}
6489
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006490LayerTestResult<float, 3> PadFloat323dTest(
6491 armnn::IWorkloadFactory& workloadFactory,
6492 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006493{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006494 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006495}
6496
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006497LayerTestResult<float, 4> PadFloat324dTest(
6498 armnn::IWorkloadFactory& workloadFactory,
6499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006500{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006501 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006502}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006503
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006504template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01006505LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6506 armnn::IWorkloadFactory& workloadFactory,
6507 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6508 float scale,
6509 int32_t offset,
6510 float outScale,
6511 int32_t outOffset,
6512 const armnn::DataLayout layout,
6513 float epsilon)
6514{
6515 // Width: 1
6516 // Height: 1
6517 // Channels: 3
6518 // BatchSize: 1
6519 unsigned int numberOfBatches = 1;
6520 unsigned int numberOfChannels = 3;
6521 unsigned int height = 1;
6522 unsigned int width = 1;
6523
6524 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6525 numberOfBatches, numberOfChannels, height, width, layout);
6526
6527 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6528 std::vector<float> inputValues
6529 {
6530 // Batch 0, Channel 0, Height (1) x Width (1)
6531 0.00000001f,
6532
6533 // Batch 0, Channel 1, Height (1) x Width (1)
6534 0.00000002f,
6535
6536 // Batch 0, Channel 2, Height (1) x Width (1)
6537 0.00000003f,
6538 };
6539
6540 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6541 std::vector<float> expectedOutputValues
6542 {
6543 // Batch 0, Channel 0, Height (1) x Width (1)
6544 0.00000001f * approxInvL2Norm,
6545 0.00000002f * approxInvL2Norm,
6546 0.00000003f * approxInvL2Norm,
6547 };
6548
6549 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6550 inputValues, outScale, outOffset, expectedOutputValues, layout,
6551 epsilon);
6552}
6553
6554
6555template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006556LayerTestResult<T, 4> L2Normalization1dTestCommon(
6557 armnn::IWorkloadFactory& workloadFactory,
6558 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006559 float scale,
6560 int32_t offset,
6561 float outScale,
6562 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006563 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006564{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006565 // Width: 1
6566 // Height: 1
6567 // Channels: 10
6568 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006569 unsigned int numberOfBatches = 1;
6570 unsigned int numberOfChannels = 10;
6571 unsigned int height = 1;
6572 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006573
jimfly013aab7c32018-11-12 13:32:08 +00006574
Nina Drozdd41b2592018-11-19 13:03:36 +00006575 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006576 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006577 std::vector<float> inputValues
6578 {
6579 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006580 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006581
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006582 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006583 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006584
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006585 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006586 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006587
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006588 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006589 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006590
6591 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006592 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006593
6594 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006595 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006596
6597 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006598 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006599
6600 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006601 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006602
6603 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006604 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006605
6606 // Batch 0, Channel 9, Height (1) x Width (1)
6607 10.0f
6608 };
telsoa014fcda012018-03-09 14:13:49 +00006609 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006610 std::vector<float> expectedOutputValues
6611 {
6612 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006613 1.0f * approxInvL2Norm,
6614 2.0f * approxInvL2Norm,
6615 3.0f * approxInvL2Norm,
6616 4.0f * approxInvL2Norm,
6617 5.0f * approxInvL2Norm,
6618 6.0f * approxInvL2Norm,
6619 7.0f * approxInvL2Norm,
6620 8.0f * approxInvL2Norm,
6621 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00006622 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006623 };
telsoa014fcda012018-03-09 14:13:49 +00006624
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006625
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006626 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6627 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006628}
6629
Ferran Balaguere52211e2019-06-17 12:23:52 +01006630LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
6631 armnn::IWorkloadFactory& workloadFactory,
6632 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6633 const armnn::DataLayout layout)
6634{
6635 // Dummy descriptor to get the default value of epsilon.
6636 armnn::L2NormalizationDescriptor descriptor;
6637
6638 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6639 layout, descriptor.m_Eps);
6640}
6641
6642LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
6643 armnn::IWorkloadFactory& workloadFactory,
6644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6645 const armnn::DataLayout layout)
6646{
6647 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6648 layout, 1e-9f);
6649}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006650
6651LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006652 armnn::IWorkloadFactory& workloadFactory,
6653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006654 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006655{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006656 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006657}
6658
6659LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
6660 armnn::IWorkloadFactory& workloadFactory,
6661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6662 const armnn::DataLayout layout)
6663{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006664 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006665 layout);
6666}
6667
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006668LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
6669 armnn::IWorkloadFactory& workloadFactory,
6670 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6671 const armnn::DataLayout layout)
6672{
6673 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6674 1.f/128, 128, layout);
6675}
6676
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006677template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6678LayerTestResult<T, 4> L2Normalization2dTestCommon(
6679 armnn::IWorkloadFactory& workloadFactory,
6680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006681 float scale,
6682 int32_t offset,
6683 float outScale,
6684 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006685 const armnn::DataLayout layout)
6686{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006687 // Width: 5
6688 // Height: 1
6689 // Channels: 2
6690 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006691 unsigned int numberOfBatches = 1;
6692 unsigned int numberOfChannels = 2;
6693 unsigned int height = 1;
6694 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006695
Nina Drozdd41b2592018-11-19 13:03:36 +00006696 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006697 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006698 std::vector<float> inputValues
6699 {
6700 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006701 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006702
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006703 // Batch 0, Channel 1, Height (1) x Width (5)
6704 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6705 };
6706 std::vector<float> expectedOutputValues
6707 {
6708 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006709 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6710 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6711 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6712 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
6713 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006714
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006715 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006716 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6717 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6718 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6719 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006720 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006721 };
telsoa014fcda012018-03-09 14:13:49 +00006722
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006723 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6724 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006725}
telsoa014fcda012018-03-09 14:13:49 +00006726
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006727LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006728 armnn::IWorkloadFactory& workloadFactory,
6729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006730 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006731{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006732 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6733 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006734}
6735
6736LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
6737 armnn::IWorkloadFactory& workloadFactory,
6738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6739 const armnn::DataLayout layout)
6740{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006741 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006742 layout);
6743}
6744
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006745LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
6746 armnn::IWorkloadFactory& workloadFactory,
6747 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6748 const armnn::DataLayout layout)
6749{
6750 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6751 1.f/128, 128, layout);
6752}
6753
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006754template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6755LayerTestResult<T, 4> L2Normalization3dTestCommon(
6756 armnn::IWorkloadFactory& workloadFactory,
6757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006758 float scale,
6759 int32_t offset,
6760 float outScale,
6761 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006762 const armnn::DataLayout layout)
6763{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006764 // Width: 3
6765 // Height: 4
6766 // Channels: 2
6767 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006768 unsigned int numberOfBatches = 1;
6769 unsigned int numberOfChannels = 2;
6770 unsigned int height = 4;
6771 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006772
Nina Drozdd41b2592018-11-19 13:03:36 +00006773 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006774 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006775 std::vector<float> inputValues
6776 {
6777 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006778 119.0f, 21.0f, 150.0f,
6779 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006780 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00006781 147.0f, 199.0f, 220.0f,
6782
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006783 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006784 110.0f, 140.0f, 73.0f,
6785 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006786 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006787 162.0f, 12.0f, 161.0f
6788 };
6789 std::vector<float> expectedOutputValues
6790 {
6791 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006792 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006793 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006794 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6795 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006796 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006797 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006798 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006799 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6800 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6801 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6802 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6803 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6804
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006805 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006806 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6807 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006808 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006809 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6810 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006811 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6812 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006813 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6814 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6815 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006816 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006817 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6818 };
telsoa014fcda012018-03-09 14:13:49 +00006819
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006820 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6821 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006822}
telsoa014fcda012018-03-09 14:13:49 +00006823
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006824LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006825 armnn::IWorkloadFactory& workloadFactory,
6826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006827 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006828{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006829 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6830 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006831}
6832
6833LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
6834 armnn::IWorkloadFactory& workloadFactory,
6835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6836 const armnn::DataLayout layout)
6837{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006838 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006839 layout);
6840}
6841
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006842LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
6843 armnn::IWorkloadFactory& workloadFactory,
6844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6845 const armnn::DataLayout layout)
6846{
6847 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6848 1.f/128, 128, layout);
6849}
6850
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006851template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6852LayerTestResult<T, 4> L2Normalization4dTestCommon(
6853 armnn::IWorkloadFactory& workloadFactory,
6854 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006855 float scale,
6856 int32_t offset,
6857 float outScale,
6858 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006859 const armnn::DataLayout layout)
6860{
6861 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006862 // Height: 4
6863 // Channels: 3
6864 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006865 unsigned int numberOfBatches = 2;
6866 unsigned int numberOfChannels = 3;
6867 unsigned int height = 4;
6868 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006869
Nina Drozdd41b2592018-11-19 13:03:36 +00006870 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006871 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006872 std::vector<float> inputValues
6873 {
6874 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006875 235.0f, 46.0f, 178.0f,
6876 100.0f, 123.0f, 19.0f,
6877 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006878 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00006879
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006880 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006881 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006882 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00006883 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006884 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00006885
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006886 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006887 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00006888 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006889 12.0f, 209.0f, 200.0f,
6890 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00006891
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006892 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006893 67.0f, 90.0f, 49.0f,
6894 7.0f, 163.0f, 18.0f,
6895 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00006896 247.0f, 59.0f, 189.0f,
6897
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006898 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006899 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006900 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00006901 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006902 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00006903
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006904 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006905 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00006906 115.0f, 116.0f, 238.0f,
6907 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006908 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006909 };
6910 std::vector<float> expectedOutputValues
6911 {
6912 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006913 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006914 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006915 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6916 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6917 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006918 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006919 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006920 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006921 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006922 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006923 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006924 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006925
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006926 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006927 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006928 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006929 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006930 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006931 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006932 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006933 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6934 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6935 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006936 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6937 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6938 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006939
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006940 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006941 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006942 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6943 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6944 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006945 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006946 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006947 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006948 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6949 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006950 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6951 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6952 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006953
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006954 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006955 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6956 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6957 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6958 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006959 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006960 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6961 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006962 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6963 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6964 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006965 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006966 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6967
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006968 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006969 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6970 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6971 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006972 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006973 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6974 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6975 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6976 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006977 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6978 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006979 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006980 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006981
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006982 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006983 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006984 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6985 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6986 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6987 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6988 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6989 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006990 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006991 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006992 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006993 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006994 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006995 };
telsoa014fcda012018-03-09 14:13:49 +00006996
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006997 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6998 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006999}
7000
7001LayerTestResult<float, 4> L2Normalization4dTest(
7002 armnn::IWorkloadFactory& workloadFactory,
7003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7004 const armnn::DataLayout layout)
7005{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007006 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7007 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007008}
7009
7010LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
7011 armnn::IWorkloadFactory& workloadFactory,
7012 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7013 const armnn::DataLayout layout)
7014{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007015 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007016 layout);
telsoa014fcda012018-03-09 14:13:49 +00007017}
7018
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007019LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
7020 armnn::IWorkloadFactory& workloadFactory,
7021 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7022 const armnn::DataLayout layout)
7023{
7024 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7025 1.f/128, 128, layout);
7026}
7027
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007028template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007029LayerTestResult<T, 4> ConstantTestImpl(
7030 armnn::IWorkloadFactory& workloadFactory,
7031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00007032 float qScale,
7033 int32_t qOffset)
7034{
7035 constexpr unsigned int inputWidth = 3;
7036 constexpr unsigned int inputHeight = 4;
7037 constexpr unsigned int inputChannels = 3;
7038 constexpr unsigned int inputBatchSize = 2;
7039
7040 constexpr unsigned int outputWidth = inputWidth;
7041 constexpr unsigned int outputHeight = inputHeight;
7042 constexpr unsigned int outputChannels = inputChannels;
7043 constexpr unsigned int outputBatchSize = inputBatchSize;
7044
Nina Drozd58ef2c62019-05-16 12:09:18 +01007045 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7046 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007047
Nina Drozd58ef2c62019-05-16 12:09:18 +01007048 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7049 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007050
7051 // Set quantization parameters if the requested type is a quantized type.
7052 if(armnn::IsQuantizedType<T>())
7053 {
7054 inputTensorInfo.SetQuantizationScale(qScale);
7055 inputTensorInfo.SetQuantizationOffset(qOffset);
7056 outputTensorInfo.SetQuantizationScale(qScale);
7057 outputTensorInfo.SetQuantizationOffset(qOffset);
7058 }
7059
7060 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
7061 QuantizedVector<T>(qScale, qOffset, {
7062 // Batch 0, Channel 0
7063 235.0f, 46.0f, 178.0f,
7064 100.0f, 123.0f, 19.0f,
7065 172.0f, 74.0f, 250.0f,
7066 6.0f, 195.0f, 80.0f,
7067
7068 // Batch 0, Channel 1
7069 113.0f, 95.0f, 202.0f,
7070 77.0f, 114.0f, 71.0f,
7071 122.0f, 246.0f, 166.0f,
7072 82.0f, 28.0f, 37.0f,
7073
7074 // Batch 0, Channel 2
7075 56.0f, 170.0f, 162.0f,
7076 194.0f, 89.0f, 254.0f,
7077 12.0f, 209.0f, 200.0f,
7078 1.0f, 64.0f, 54.0f,
7079
7080 // Batch 1, Channel 0
7081 67.0f, 90.0f, 49.0f,
7082 7.0f, 163.0f, 18.0f,
7083 25.0f, 117.0f, 103.0f,
7084 247.0f, 59.0f, 189.0f,
7085
7086 // Batch 1, Channel 1
7087 239.0f, 104.0f, 199.0f,
7088 17.0f, 124.0f, 153.0f,
7089 222.0f, 217.0f, 75.0f,
7090 32.0f, 126.0f, 21.0f,
7091
7092 // Batch 1, Channel 2
7093 97.0f, 145.0f, 215.0f,
7094 115.0f, 116.0f, 238.0f,
7095 226.0f, 16.0f, 132.0f,
7096 92.0f, 125.0f, 88.0f,
7097 })));
7098
7099 LayerTestResult<T, 4> result(outputTensorInfo);
7100 result.outputExpected = input;
7101
7102 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7103
7104 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
7105 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
7106
7107 armnn::ConstantQueueDescriptor descriptor;
7108 descriptor.m_LayerOutput = &constantTensor;
7109
7110 armnn::WorkloadInfo info;
7111 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7112
7113 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
7114
7115 outputHandle->Allocate();
7116
Derek Lambertif30f7d32019-04-09 10:25:02 +01007117 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007118 workload->Execute();
7119
7120 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7121 return result;
7122}
7123
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007124LayerTestResult<float, 4> ConstantTest(
7125 armnn::IWorkloadFactory& workloadFactory,
7126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007127{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007128 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007129}
7130
Nina Drozd58ef2c62019-05-16 12:09:18 +01007131LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
7132 armnn::IWorkloadFactory& workloadFactory,
7133 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7134{
7135 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
7136}
7137
7138LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007139 armnn::IWorkloadFactory& workloadFactory,
7140 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007141{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007142 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007143}
7144
Jim Flynn4ed6c832019-05-20 11:02:46 +01007145LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00007146 armnn::IWorkloadFactory& workloadFactory,
7147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7148{
7149 unsigned int outputWidth = 3;
7150 unsigned int outputHeight = 6;
7151 unsigned int outputChannels = 3;
7152
7153 unsigned int inputWidth1 = 3;
7154 unsigned int inputHeight1 = 6;
7155 unsigned int inputChannels1 = 2;
7156
7157 unsigned int inputWidth2 = 3;
7158 unsigned int inputHeight2 = 6;
7159 unsigned int inputChannels2 = 1;
7160
7161 // Defines the tensor descriptors.
7162 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7163 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7164 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7165
7166 // Quantized input1 tensor. Range [-3, 1]
7167 const float inputScale1 = 0.015686f;
7168 const int32_t inputOffset1 = 192;
7169
7170 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7171 {
7172 1, 2, 3,
7173 4, 5, 6,
7174 7, 8, 9,
7175 10, 11, 12,
7176 13, 14, 15,
7177 16, 17, 18,
7178
7179 19, 20, 21,
7180 22, 23, 24,
7181 25, 26, 27,
7182 28, 29, 30,
7183 31, 32, 33,
7184 34, 35, 36,
7185 })
7186 );
7187
7188 // Quatized input2 tensor. Range [-1, 4]
7189 const float inputScale2 = 0.019608f;
7190 const int32_t inputOffset2 = 50;
7191
7192 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7193 {
7194 37, 38, 39,
7195 40, 41, 42,
7196 43, 44, 45,
7197 46, 47, 48,
7198 49, 50, 51,
7199 52, 53, 54,
7200 })
7201 );
7202
7203 // Output has the same quantization parameters than input1,
7204 // so that only the requantization of input2 is required
7205 const float outputScale = 0.015686f;
7206 const int32_t outputOffset = 192;
7207
7208 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7209
7210 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7211 {
7212 1, 2, 3,
7213 4, 5, 6,
7214 7, 8, 9,
7215 10, 11, 12,
7216 13, 14, 15,
7217 16, 17, 18,
7218
7219 19, 20, 21,
7220 22, 23, 24,
7221 25, 26, 27,
7222 28, 29, 30,
7223 31, 32, 33,
7224 34, 35, 36,
7225
7226 176, 177, 178,
7227 179, 181, 182,
7228 183, 184, 186,
7229 187, 188, 189,
7230 191, 192, 193,
7231 195, 196, 197,
7232 })
7233 );
7234
7235 outputTensorInfo.SetQuantizationScale(outputScale);
7236 outputTensorInfo.SetQuantizationOffset(outputOffset);
7237 inputTensorInfo1.SetQuantizationScale(inputScale1);
7238 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7239 inputTensorInfo2.SetQuantizationScale(inputScale2);
7240 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7241
7242 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007243 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007244
7245 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007246 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007247
7248 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7249
7250 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7251
7252 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7253 subTensorsSupported ?
7254 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7255 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7256
7257 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7258 subTensorsSupported ?
7259 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7260 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7261
Jim Flynne242f2d2019-05-22 14:24:13 +01007262 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00007263 armnn::WorkloadInfo info;
7264 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7265 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7266 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7267
7268 data.m_ViewOrigins.push_back(window1);
7269 data.m_ViewOrigins.push_back(window2);
7270
Jim Flynn4ed6c832019-05-20 11:02:46 +01007271 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007272
7273 inputHandle1->Allocate();
7274 inputHandle2->Allocate();
7275 outputHandle->Allocate();
7276
7277 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7278 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7279
Derek Lambertif30f7d32019-04-09 10:25:02 +01007280 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00007281 workload->Execute();
7282
7283 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7284
7285 return ret;
7286}
7287
Jim Flynn4ed6c832019-05-20 11:02:46 +01007288LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007289 armnn::IWorkloadFactory& workloadFactory,
7290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007291{
surmeh013537c2c2018-05-18 16:31:43 +01007292 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00007293 unsigned int outputHeight = 6;
7294 unsigned int outputChannels = 3;
7295
surmeh013537c2c2018-05-18 16:31:43 +01007296 unsigned int inputWidth1 = 3;
7297 unsigned int inputHeight1 = 6;
7298 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00007299
surmeh013537c2c2018-05-18 16:31:43 +01007300 unsigned int inputWidth2 = 3;
7301 unsigned int inputHeight2 = 6;
7302 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00007303
telsoa01c577f2c2018-08-31 09:22:23 +01007304 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00007305 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7306 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7307 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00007308
Jim Flynn4ed6c832019-05-20 11:02:46 +01007309 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00007310 const float scale = 0.13497836f;
7311 const int32_t offset = -7;
7312
7313 outputTensorInfo.SetQuantizationScale(scale);
7314 outputTensorInfo.SetQuantizationOffset(offset);
7315 inputTensorInfo1.SetQuantizationScale(scale);
7316 inputTensorInfo1.SetQuantizationOffset(offset);
7317 inputTensorInfo2.SetQuantizationScale(scale);
7318 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00007319
7320 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7321
7322 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01007323 {
7324 1, 2, 3,
7325 4, 5, 6,
7326 7, 8, 9,
7327 10, 11, 12,
7328 13, 14, 15,
7329 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007330
surmeh013537c2c2018-05-18 16:31:43 +01007331 19, 20, 21,
7332 22, 23, 24,
7333 25, 26, 27,
7334 28, 29, 30,
7335 31, 32, 33,
7336 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007337
surmeh013537c2c2018-05-18 16:31:43 +01007338 37, 38, 39,
7339 40, 41, 42,
7340 43, 44, 45,
7341 46, 47, 48,
7342 49, 50, 51,
7343 52, 53, 54,
7344 })
telsoa014fcda012018-03-09 14:13:49 +00007345 );
7346
telsoa014fcda012018-03-09 14:13:49 +00007347 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7348 {
surmeh013537c2c2018-05-18 16:31:43 +01007349 1, 2, 3,
7350 4, 5, 6,
7351 7, 8, 9,
7352 10, 11, 12,
7353 13, 14, 15,
7354 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007355
surmeh013537c2c2018-05-18 16:31:43 +01007356 19, 20, 21,
7357 22, 23, 24,
7358 25, 26, 27,
7359 28, 29, 30,
7360 31, 32, 33,
7361 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007362 })
7363 );
7364
7365 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7366 {
surmeh013537c2c2018-05-18 16:31:43 +01007367 37, 38, 39,
7368 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00007369 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01007370 46, 47, 48,
7371 49, 50, 51,
7372 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00007373 })
7374 );
7375
telsoa01c577f2c2018-08-31 09:22:23 +01007376 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007377 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00007378
telsoa01c577f2c2018-08-31 09:22:23 +01007379 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007380 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00007381
telsoa014fcda012018-03-09 14:13:49 +00007382
7383 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7384
7385 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7386
7387 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7388 subTensorsSupported ?
7389 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7390 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7391
7392 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7393 subTensorsSupported ?
7394 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7395 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7396
telsoa014fcda012018-03-09 14:13:49 +00007397
Jim Flynne242f2d2019-05-22 14:24:13 +01007398 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00007399 armnn::WorkloadInfo info;
7400 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7401 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00007402 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7403
7404 data.m_ViewOrigins.push_back(window1);
7405 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00007406
Jim Flynn4ed6c832019-05-20 11:02:46 +01007407 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00007408
7409 inputHandle1->Allocate();
7410 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007411 outputHandle->Allocate();
7412
7413 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7414 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007415
Derek Lambertif30f7d32019-04-09 10:25:02 +01007416 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007417 workload->Execute();
7418
7419 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7420
7421 return ret;
7422}
7423
Jim Flynn4ed6c832019-05-20 11:02:46 +01007424LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01007425 armnn::IWorkloadFactory& workloadFactory,
7426 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7427{
7428 unsigned int outputWidth = 3;
7429 unsigned int outputHeight = 6;
7430 unsigned int outputChannels = 3;
7431
7432 unsigned int inputWidth1 = 3;
7433 unsigned int inputHeight1 = 6;
7434 unsigned int inputChannels1 = 2;
7435
7436 unsigned int inputWidth2 = 3;
7437 unsigned int inputHeight2 = 6;
7438 unsigned int inputChannels2 = 1;
7439
7440 // Defines the tensor descriptors.
7441 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7442 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7443 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7444
Jim Flynn4ed6c832019-05-20 11:02:46 +01007445 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01007446 const float scale = 0.13497836f;
7447 const int32_t offset = -7;
7448
7449 outputTensorInfo.SetQuantizationScale(scale);
7450 outputTensorInfo.SetQuantizationOffset(offset);
7451 inputTensorInfo1.SetQuantizationScale(scale);
7452 inputTensorInfo1.SetQuantizationOffset(offset);
7453 inputTensorInfo2.SetQuantizationScale(scale);
7454 inputTensorInfo2.SetQuantizationOffset(offset);
7455
7456 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7457
7458 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7459 {
7460 1, 2, 3,
7461 4, 5, 6,
7462 7, 8, 9,
7463 10, 11, 12,
7464 13, 14, 15,
7465 16, 17, 18,
7466
7467 19, 20, 21,
7468 22, 23, 24,
7469 25, 26, 27,
7470 28, 29, 30,
7471 31, 32, 33,
7472 34, 35, 36,
7473
7474 37, 38, 39,
7475 40, 41, 42,
7476 43, 44, 45,
7477 46, 47, 48,
7478 49, 50, 51,
7479 52, 53, 54,
7480 }));
7481
7482 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7483 {
7484 1, 2, 3,
7485 4, 5, 6,
7486 7, 8, 9,
7487 10, 11, 12,
7488 13, 14, 15,
7489 16, 17, 18,
7490
7491 19, 20, 21,
7492 22, 23, 24,
7493 25, 26, 27,
7494 28, 29, 30,
7495 31, 32, 33,
7496 34, 35, 36,
7497 }));
7498
7499 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
7500 {
7501 37, 38, 39,
7502 40, 41, 42,
7503 43, 44, 45,
7504 46, 47, 48,
7505 49, 50, 51,
7506 52, 53, 54,
7507 }));
7508
7509 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007510 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007511
7512 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007513 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007514
7515
7516 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7517
7518 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7519
7520 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7521 subTensorsSupported ?
7522 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7523 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7524
7525 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7526 subTensorsSupported ?
7527 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7528 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7529
7530
Jim Flynne242f2d2019-05-22 14:24:13 +01007531 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01007532 armnn::WorkloadInfo info;
7533 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7534 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7535 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7536
7537 data.m_ViewOrigins.push_back(window1);
7538 data.m_ViewOrigins.push_back(window2);
7539
Jim Flynn4ed6c832019-05-20 11:02:46 +01007540 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007541
7542 inputHandle1->Allocate();
7543 inputHandle2->Allocate();
7544 outputHandle->Allocate();
7545
7546 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7547 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7548
7549 workload->PostAllocationConfigure();
7550 workload->Execute();
7551
7552 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7553
7554 return ret;
7555}
telsoa014fcda012018-03-09 14:13:49 +00007556
surmeh01bceff2f2018-03-29 16:29:27 +01007557namespace
telsoa014fcda012018-03-09 14:13:49 +00007558{
Sadik Armagan2999a022019-04-09 14:20:12 +01007559template <typename T>
7560LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007561 armnn::IWorkloadFactory& workloadFactory,
7562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7563 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007564 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007565 float scale0,
7566 int32_t offset0,
7567 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007568 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007569 float scale1,
7570 int32_t offset1,
7571 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007572 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007573 float outScale,
7574 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01007575{
Sadik Armagan2999a022019-04-09 14:20:12 +01007576 auto dataType = (std::is_same<T, uint8_t>::value ?
7577 armnn::DataType::QuantisedAsymm8 :
7578 armnn::DataType::QuantisedSymm16);
7579
7580 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
7581 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
7582 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00007583
surmeh01bceff2f2018-03-29 16:29:27 +01007584 inputTensorInfo0.SetQuantizationScale(scale0);
7585 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00007586
surmeh01bceff2f2018-03-29 16:29:27 +01007587 inputTensorInfo1.SetQuantizationScale(scale1);
7588 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00007589
surmeh01bceff2f2018-03-29 16:29:27 +01007590 outputTensorInfo.SetQuantizationScale(outScale);
7591 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00007592
Sadik Armagan2999a022019-04-09 14:20:12 +01007593 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7594 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00007595
Sadik Armagan2999a022019-04-09 14:20:12 +01007596 LayerTestResult<T, 4> result(outputTensorInfo);
7597 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7598
7599 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7600 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7601 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7602
7603 armnn::AdditionQueueDescriptor data;
7604 armnn::WorkloadInfo info;
7605 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7606 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7607 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7608
7609 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
7610
7611 inputHandle0->Allocate();
7612 inputHandle1->Allocate();
7613 outputHandle->Allocate();
7614
7615 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7616 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7617
Derek Lambertif30f7d32019-04-09 10:25:02 +01007618 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01007619 workload->Execute();
7620
7621 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7622
7623 return result;
7624}
7625} // anonymous namespace
7626
7627LayerTestResult<uint8_t, 4> AdditionUint8Test(
7628 armnn::IWorkloadFactory& workloadFactory,
7629 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7630{
7631 const unsigned int shape0[] = { 1, 2, 2, 3 };
7632 const unsigned int shape1[] = { 1, 2, 2, 3 };
7633
7634 std::vector<uint8_t> input0(
7635 {
7636 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
7637 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
7638 });
7639
7640 std::vector<uint8_t> input1(
7641 {
7642 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7643 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7644 });
7645
7646 std::vector<uint8_t> output(
7647 {
7648 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
7649 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
7650 });
7651
7652 return AdditionQuantizeTestHelper(workloadFactory,
7653 memoryManager,
7654 shape0, input0, 7.0f, 3,
7655 shape1, input1, 7.0f, 3,
7656 shape0, output, 7.0f, 3);
7657}
7658
7659LayerTestResult<int16_t, 4> AdditionInt16Test(
7660 armnn::IWorkloadFactory& workloadFactory,
7661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7662{
7663 const unsigned int shape0[] = { 1, 2, 2, 3 };
7664 const unsigned int shape1[] = { 1, 2, 2, 3 };
7665
7666 std::vector<int16_t> input0(
7667 {
7668 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7669 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7670 });
7671
7672 std::vector<int16_t> input1(
7673 {
7674 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7675 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7676 });
7677
7678 std::vector<int16_t> output(
7679 {
7680 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7681 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7682 });
7683
7684 return AdditionQuantizeTestHelper(workloadFactory,
7685 memoryManager,
7686 shape0, input0, 7.0f, 0,
7687 shape1, input1, 7.0f, 0,
7688 shape0, output, 7.0f, 0);
7689}
7690
7691namespace
7692{
7693template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7694LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7695 armnn::IWorkloadFactory& workloadFactory,
7696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7697 const unsigned int shape0[4],
7698 const std::vector<T> & values0,
7699 float scale0,
7700 int32_t offset0,
7701 const unsigned int shape1[4],
7702 const std::vector<T> & values1,
7703 float scale1,
7704 int32_t offset1,
7705 const unsigned int outShape[4],
7706 const std::vector<T> & outValues,
7707 float outScale,
7708 int32_t outOffset)
7709{
7710 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7711 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7712 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7713
7714 inputTensorInfo0.SetQuantizationScale(scale0);
7715 inputTensorInfo0.SetQuantizationOffset(offset0);
7716
7717 inputTensorInfo1.SetQuantizationScale(scale1);
7718 inputTensorInfo1.SetQuantizationOffset(offset1);
7719
7720 outputTensorInfo.SetQuantizationScale(outScale);
7721 outputTensorInfo.SetQuantizationOffset(outOffset);
7722
7723 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7724 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7725
7726 LayerTestResult<T, 4> result(outputTensorInfo);
7727 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007728
surmeh01bceff2f2018-03-29 16:29:27 +01007729 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007730 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007731 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7732
7733 armnn::MultiplicationQueueDescriptor data;
7734 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007735 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7736 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007737 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7738
7739 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7740
surmeh01bceff2f2018-03-29 16:29:27 +01007741 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007742 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007743 outputHandle->Allocate();
7744
surmeh01bceff2f2018-03-29 16:29:27 +01007745 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007746 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007747
Derek Lambertif30f7d32019-04-09 10:25:02 +01007748 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007749 workload->Execute();
7750
7751 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7752
7753 return result;
7754}
surmeh01bceff2f2018-03-29 16:29:27 +01007755} // anonymous namespace
7756
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007757LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7758 armnn::IWorkloadFactory& workloadFactory,
7759 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007760{
7761 unsigned int batchSize = 1;
7762 unsigned int channels = 2;
7763 unsigned int height = 2;
7764 unsigned int width = 3;
7765 const unsigned int shape[] = { batchSize, channels, height, width };
7766
telsoa01c577f2c2018-08-31 09:22:23 +01007767 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007768 std::vector<uint8_t> input0({
7769 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7770 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7771 });
7772
telsoa01c577f2c2018-08-31 09:22:23 +01007773 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007774 std::vector<uint8_t> input1({
7775 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7776 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7777 });
7778
telsoa01c577f2c2018-08-31 09:22:23 +01007779 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007780 std::vector<uint8_t> output(
7781 {
7782 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7783 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7784 });
7785
Sadik Armagan2999a022019-04-09 14:20:12 +01007786 // Scale/offset chosen to have output values out of range.
7787 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7788 memoryManager,
7789 shape,
7790 input0,
7791 4.0f,
7792 1,
7793 shape,
7794 input1,
7795 3.0f,
7796 -2,
7797 shape,
7798 output,
7799 1366.255f,
7800 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007801}
7802
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007803LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7804 armnn::IWorkloadFactory& workloadFactory,
7805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007806{
7807 const unsigned int shape0[] = { 1, 2, 2, 3 };
7808 const unsigned int shape1[] = { 1, 1, 1, 1 };
7809
7810 std::vector<uint8_t> input0({
7811 1, 2, 3, 4, 5, 6,
7812 7, 8, 9, 10, 11, 12
7813 });
7814
7815 std::vector<uint8_t> input1({2});
7816
7817 std::vector<uint8_t> output({
7818 2, 4, 6, 8, 10, 12,
7819 14, 16, 18, 20, 22, 24
7820 });
7821
Sadik Armagan2999a022019-04-09 14:20:12 +01007822 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7823 memoryManager,
7824 shape0,
7825 input0,
7826 1.0f,
7827 0,
7828 shape1,
7829 input1,
7830 1.0f,
7831 0,
7832 shape0,
7833 output,
7834 1.0f,
7835 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007836}
7837
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007838LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7839 armnn::IWorkloadFactory& workloadFactory,
7840 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007841{
7842 const unsigned int shape0[] = { 1, 2, 2, 3 };
7843 const unsigned int shape1[] = { 1, 1, 1, 3 };
7844
7845 std::vector<uint8_t> input0({
7846 1, 2, 3, 4, 5, 6,
7847 7, 8, 9, 10, 11, 12
7848 });
7849
7850 std::vector<uint8_t> input1({1, 2, 3});
7851
7852 std::vector<uint8_t> output({
7853 1, 4, 9, 4, 10, 18,
7854 7, 16, 27, 10, 22, 36
7855 });
7856
Sadik Armagan2999a022019-04-09 14:20:12 +01007857 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7858 memoryManager,
7859 shape0,
7860 input0,
7861 1.0f,
7862 0,
7863 shape1,
7864 input1,
7865 1.0f,
7866 0,
7867 shape0,
7868 output,
7869 1.0f,
7870 0);
7871}
7872
7873LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7874 armnn::IWorkloadFactory& workloadFactory,
7875 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7876{
7877 const unsigned int shape[] = { 1, 2, 2, 3 };
7878
7879 std::vector<int16_t> input0(
7880 {
7881 6, 7, 8, 9, 10, 11,
7882 12, 13, 14, 15, 16, 17
7883 });
7884
7885 std::vector<int16_t> input1(
7886 {
7887 1, 2, 3, 4, 5, 6,
7888 7, 8, 9, 10, 11, 12
7889 });
7890
7891 std::vector<int16_t> output(
7892 {
7893 6, 14, 24, 36, 50, 66,
7894 84, 104, 126, 150, 176, 204
7895 });
7896
7897 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7898 memoryManager,
7899 shape,
7900 input0,
7901 1.0f,
7902 0,
7903 shape,
7904 input1,
7905 1.0f,
7906 0,
7907 shape,
7908 output,
7909 1.0f,
7910 0);
7911}
7912
7913LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
7914 armnn::IWorkloadFactory& workloadFactory,
7915 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7916{
7917 const unsigned int shape0[] = { 1, 2, 2, 3 };
7918 const unsigned int shape1[] = { 1, 1, 1, 1 };
7919
7920 std::vector<int16_t> input0(
7921 {
7922 1, 2, 3, 4, 5, 6,
7923 7, 8, 9, 10, 11, 12
7924 });
7925
7926 std::vector<int16_t> input1({2});
7927
7928 std::vector<int16_t> output(
7929 {
7930 2, 4, 6, 8, 10, 12,
7931 14, 16, 18, 20, 22, 24
7932 });
7933
7934 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7935 memoryManager,
7936 shape0,
7937 input0,
7938 1.0f,
7939 0,
7940 shape1,
7941 input1,
7942 1.0f,
7943 0,
7944 shape0,
7945 output,
7946 1.0f,
7947 0);
7948}
7949
7950LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7951 armnn::IWorkloadFactory& workloadFactory,
7952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7953{
7954 const unsigned int shape0[] = { 1, 2, 2, 3 };
7955 const unsigned int shape1[] = { 1, 1, 1, 3 };
7956
7957 std::vector<int16_t> input0(
7958 {
7959 1, 2, 3, 4, 5, 6,
7960 7, 8, 9, 10, 11, 12
7961 });
7962
7963 std::vector<int16_t> input1({1, 2, 3});
7964
7965 std::vector<int16_t> output(
7966 {
7967 1, 4, 9, 4, 10, 18,
7968 7, 16, 27, 10, 22, 36
7969 });
7970
7971 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7972 memoryManager,
7973 shape0,
7974 input0,
7975 1.0f,
7976 0,
7977 shape1,
7978 input1,
7979 1.0f,
7980 0,
7981 shape0,
7982 output,
7983 1.0f,
7984 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007985}
telsoa014fcda012018-03-09 14:13:49 +00007986
David Beckf195f032018-09-06 16:46:34 +01007987namespace
7988{
Sadik Armagan2999a022019-04-09 14:20:12 +01007989template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007990LayerTestResult<T, 4> SubtractionTestHelper(
7991 armnn::IWorkloadFactory& workloadFactory,
7992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7993 const unsigned int shape0[4],
7994 const std::vector<T>& values0,
7995 float scale0,
7996 int32_t offset0,
7997 const unsigned int shape1[4],
7998 const std::vector<T> & values1,
7999 float scale1,
8000 int32_t offset1,
8001 const unsigned int outShape[4],
8002 const std::vector<T> & outValues,
8003 float outScale,
8004 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01008005{
Sadik Armagan2999a022019-04-09 14:20:12 +01008006 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8007 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8008 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01008009
8010 inputTensorInfo0.SetQuantizationScale(scale0);
8011 inputTensorInfo0.SetQuantizationOffset(offset0);
8012
8013 inputTensorInfo1.SetQuantizationScale(scale1);
8014 inputTensorInfo1.SetQuantizationOffset(offset1);
8015
8016 outputTensorInfo.SetQuantizationScale(outScale);
8017 outputTensorInfo.SetQuantizationOffset(outOffset);
8018
8019 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8020 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8021
8022 LayerTestResult<T, 4> result(outputTensorInfo);
8023 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8024
8025 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8026 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8027 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8028
8029 armnn::SubtractionQueueDescriptor data;
8030 armnn::WorkloadInfo info;
8031 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8032 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8033 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8034
8035 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
8036
8037 inputHandle0->Allocate();
8038 inputHandle1->Allocate();
8039 outputHandle->Allocate();
8040
8041 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8042 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8043
Derek Lambertif30f7d32019-04-09 10:25:02 +01008044 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01008045 workload->Execute();
8046
8047 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8048
8049 return result;
8050}
8051} // anonymous namespace
8052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008053LayerTestResult<uint8_t, 4> SubtractionUint8Test(
8054 armnn::IWorkloadFactory& workloadFactory,
8055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008056{
8057 const unsigned int shape0[] = { 1, 1, 2, 2 };
8058 const unsigned int shape1[] = { 1, 1, 2, 2 };
8059
8060 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8061 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
8062 std::vector<uint8_t> output({ 3, 3, 5, 5 });
8063
Sadik Armagan2999a022019-04-09 14:20:12 +01008064 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8065 memoryManager,
8066 shape0, input0, 0.5f, 2,
8067 shape1, input1, 1.0f, 0,
8068 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008069}
8070
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008071LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
8072 armnn::IWorkloadFactory& workloadFactory,
8073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008074{
8075 const unsigned int shape0[] = { 1, 1, 2, 2 };
8076 const unsigned int shape1[] = { 1, 1, 1, 1 };
8077
8078 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8079 std::vector<uint8_t> input1({ 2 });
8080 std::vector<uint8_t> output({ 5, 6, 7, 8 });
8081
Sadik Armagan2999a022019-04-09 14:20:12 +01008082 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8083 memoryManager,
8084 shape0, input0, 0.5f, 2,
8085 shape1, input1, 1.0f, 0,
8086 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01008087}
8088
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008089LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
8090 armnn::IWorkloadFactory& workloadFactory,
8091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008092{
8093 const unsigned int shape0[] = { 1, 1, 2, 2 };
8094 const unsigned int shape1[] = { 1, 1, 2, 1 };
8095
8096 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8097 std::vector<uint8_t> input1({ 2, 1 });
8098 std::vector<uint8_t> output({ 8, 11, 12, 15 });
8099
Sadik Armagan2999a022019-04-09 14:20:12 +01008100 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8101 memoryManager,
8102 shape0, input0, 1.0f, 0,
8103 shape1, input1, 1.0f, 0,
8104 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008105}
8106
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008107LayerTestResult<float, 4> SubtractionTest(
8108 armnn::IWorkloadFactory& workloadFactory,
8109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008110{
8111 const unsigned int shape0[] = { 1, 1, 2, 2 };
8112 const unsigned int shape1[] = { 1, 1, 2, 2 };
8113
8114 std::vector<float> input0({ 1, 2, 3, 4 });
8115 std::vector<float> input1({ 1, -1, 0, 2 });
8116 std::vector<float> output({ 0, 3, 3, 2 });
8117
Sadik Armagan2999a022019-04-09 14:20:12 +01008118 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8119 memoryManager,
8120 shape0, input0, 1.0f, 0,
8121 shape1, input1, 1.0f, 0,
8122 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008123}
8124
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008125LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
8126 armnn::IWorkloadFactory& workloadFactory,
8127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008128{
8129 const unsigned int shape0[] = { 1, 1, 2, 2 };
8130 const unsigned int shape1[] = { 1, 1, 1, 1 };
8131
8132 std::vector<float> input0({ 1, 2, 3, 4 });
8133 std::vector<float> input1({ 10 });
8134 std::vector<float> output({ -9, -8, -7, -6 });
8135
Sadik Armagan2999a022019-04-09 14:20:12 +01008136 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8137 memoryManager,
8138 shape0, input0, 1.0f, 0,
8139 shape1, input1, 1.0f, 0,
8140 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008141}
8142
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008143LayerTestResult<float, 4> SubtractionBroadcastTest(
8144 armnn::IWorkloadFactory& workloadFactory,
8145 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008146{
8147 const unsigned int shape0[] = { 1, 1, 2, 2 };
8148 const unsigned int shape1[] = { 1, 1, 1, 2 };
8149
8150 std::vector<float> input0({ 1, 2, 3, 4 });
8151 std::vector<float> input1({ 10, -5 });
8152 std::vector<float> output({ -9, 7, -7, 9 });
8153
Sadik Armagan2999a022019-04-09 14:20:12 +01008154 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8155 memoryManager,
8156 shape0, input0, 1.0f, 0,
8157 shape1, input1, 1.0f, 0,
8158 shape0, output, 1.0f, 0);
8159}
8160
8161LayerTestResult<int16_t, 4> SubtractionInt16Test(
8162 armnn::IWorkloadFactory& workloadFactory,
8163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8164{
8165 const unsigned int shape0[] = { 1, 1, 2, 2 };
8166 const unsigned int shape1[] = { 1, 1, 2, 2 };
8167
8168 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8169 std::vector<int16_t> input1({ 1, 2, 1, 2 });
8170 std::vector<int16_t> output({ 3, 3, 5, 5 });
8171
8172 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8173 memoryManager,
8174 shape0, input0, 0.5f, 0,
8175 shape1, input1, 1.0f, 0,
8176 shape0, output, 1.0f, 0);
8177}
8178
8179LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8180 armnn::IWorkloadFactory& workloadFactory,
8181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8182{
8183 const unsigned int shape0[] = { 1, 1, 2, 2 };
8184 const unsigned int shape1[] = { 1, 1, 1, 1 };
8185
8186 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8187 std::vector<int16_t> input1({ 2 });
8188 std::vector<int16_t> output({ 3, 4, 5, 6 });
8189
8190 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8191 memoryManager,
8192 shape0, input0, 0.5f, 0,
8193 shape1, input1, 1.0f, 0,
8194 shape0, output, 1.0f, 0);
8195}
8196
8197LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8198 armnn::IWorkloadFactory& workloadFactory,
8199 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8200{
8201 const unsigned int shape0[] = { 1, 1, 2, 2 };
8202 const unsigned int shape1[] = { 1, 1, 2, 1 };
8203
8204 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8205 std::vector<int16_t> input1({ 2, 1 });
8206 std::vector<int16_t> output({ 8, 11, 12, 15 });
8207
8208 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8209 memoryManager,
8210 shape0, input0, 1.0f, 0,
8211 shape1, input1, 1.0f, 0,
8212 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008213}
8214
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008215LayerTestResult<float, 4> BatchNormTest(
8216 armnn::IWorkloadFactory& workloadFactory,
8217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008218{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008219 // BatchSize: 1
8220 // Channels: 2
8221 // Height: 3
8222 // Width: 2
8223
8224 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8225 std::vector<float> inputValues
8226 {
8227 // Batch 0, Channel 0, Height (3) x Width (2)
8228 1.f, 4.f,
8229 4.f, 2.f,
8230 1.f, 6.f,
8231
8232 // Batch 0, Channel 1, Height (3) x Width (2)
8233 1.f, 1.f,
8234 4.f, 1.f,
8235 -2.f, 4.f
8236 };
8237 std::vector<float> expectedOutputValues
8238 {
8239 // Batch 0, Channel 0, Height (3) x Width (2)
8240 1.f, 4.f,
8241 4.f, 2.f,
8242 1.f, 6.f,
8243
8244 // Batch 0, Channel 1, Height (3) x Width (2)
8245 3.f, 3.f,
8246 4.f, 3.f,
8247 2.f, 4.f
8248 };
8249
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008250 return BatchNormTestImpl<armnn::DataType::Float32>(
8251 workloadFactory, memoryManager,
8252 inputOutputShape, inputValues, expectedOutputValues,
8253 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008254}
8255
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008256LayerTestResult<float, 4> BatchNormNhwcTest(
8257 armnn::IWorkloadFactory& workloadFactory,
8258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008259{
8260 // BatchSize: 1
8261 // Height: 3
8262 // Width: 2
8263 // Channels: 2
8264
8265 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8266 std::vector<float> inputValues
8267 {
8268 // Batch 0, Height 0, Width (2) x Channel (2)
8269 1.f, 1.f,
8270 4.f, 1.f,
8271
8272 // Batch 0, Height 1, Width (2) x Channel (2)
8273 4.f, 4.f,
8274 2.f, 1.f,
8275
8276 // Batch 0, Height 2, Width (2) x Channel (2)
8277 1.f, -2.f,
8278 6.f, 4.f
8279 };
8280 std::vector<float> expectedOutputValues
8281 {
8282 // Batch 0, Height 0, Width (2) x Channel (2)
8283 1.f, 3.f,
8284 4.f, 3.f,
8285
8286 // Batch 0, Height 1, Width (2) x Channel (2)
8287 4.f, 4.f,
8288 2.f, 3.f,
8289
8290 // Batch 0, Height 2, Width (2) x Channel (2)
8291 1.f, 2.f,
8292 6.f, 4.f
8293 };
8294
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008295 return BatchNormTestImpl<armnn::DataType::Float32>(
8296 workloadFactory, memoryManager,
8297 inputOutputShape, inputValues, expectedOutputValues,
8298 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008299}
8300
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008301LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8302 armnn::IWorkloadFactory& workloadFactory,
8303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008304{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008305 // BatchSize: 1
8306 // Channels: 2
8307 // Height: 3
8308 // Width: 2
8309
8310 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8311 std::vector<float> inputValues
8312 {
8313 // Batch 0, Channel 0, Height (3) x Width (2)
8314 1.f, 4.f,
8315 4.f, 2.f,
8316 1.f, 6.f,
8317
8318 // Batch 0, Channel 1, Height (3) x Width (2)
8319 1.f, 1.f,
8320 4.f, 1.f,
8321 -2.f, 4.f
8322 };
8323 std::vector<float> expectedOutputValues
8324 {
8325 // Batch 0, Channel 0, Height (3) x Width (2)
8326 1.f, 4.f,
8327 4.f, 2.f,
8328 1.f, 6.f,
8329
8330 // Batch 0, Channel 1, Height (3) x Width (2)
8331 3.f, 3.f,
8332 4.f, 3.f,
8333 2.f, 4.f
8334 };
8335
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008336 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8337 workloadFactory, memoryManager,
8338 inputOutputShape, inputValues, expectedOutputValues,
8339 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008340}
8341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008342LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8343 armnn::IWorkloadFactory& workloadFactory,
8344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008345{
8346 // BatchSize: 1
8347 // Height: 3
8348 // Width: 2
8349 // Channels: 2
8350
8351 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8352 std::vector<float> inputValues
8353 {
8354 // Batch 0, Height 0, Width (2) x Channel (2)
8355 1.f, 1.f,
8356 4.f, 1.f,
8357
8358 // Batch 0, Height 1, Width (2) x Channel (2)
8359 4.f, 4.f,
8360 2.f, 1.f,
8361
8362 // Batch 0, Height 2, Width (2) x Channel (2)
8363 1.f, -2.f,
8364 6.f, 4.f
8365 };
8366 std::vector<float> expectedOutputValues
8367 {
8368 // Batch 0, Height 0, Width (2) x Channel (2)
8369 1.f, 3.f,
8370 4.f, 3.f,
8371
8372 // Batch 0, Height 1, Width (2) x Channel (2)
8373 4.f, 4.f,
8374 2.f, 3.f,
8375
8376 // Batch 0, Height 2, Width (2) x Channel (2)
8377 1.f, 2.f,
8378 6.f, 4.f
8379 };
8380
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008381 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8382 (workloadFactory, memoryManager,
8383 inputOutputShape, inputValues, expectedOutputValues,
8384 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008385}
8386
Matteo Martincighf5507132019-06-04 10:59:47 +01008387LayerTestResult<int16_t, 4> BatchNormInt16Test(
8388 armnn::IWorkloadFactory& workloadFactory,
8389 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8390{
8391 // BatchSize: 1
8392 // Channels: 2
8393 // Height: 3
8394 // Width: 2
8395
8396 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8397 std::vector<float> inputValues
8398 {
8399 // Batch 0, Channel 0, Height (3) x Width (2)
8400 1.f, 4.f,
8401 4.f, 2.f,
8402 1.f, 6.f,
8403
8404 // Batch 0, Channel 1, Height (3) x Width (2)
8405 1.f, 1.f,
8406 4.f, 1.f,
8407 -2.f, 4.f
8408 };
8409 std::vector<float> expectedOutputValues
8410 {
8411 // Batch 0, Channel 0, Height (3) x Width (2)
8412 1.f, 4.f,
8413 4.f, 2.f,
8414 1.f, 6.f,
8415
8416 // Batch 0, Channel 1, Height (3) x Width (2)
8417 3.f, 3.f,
8418 4.f, 3.f,
8419 2.f, 4.f
8420 };
8421
8422 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8423 workloadFactory, memoryManager,
8424 inputOutputShape, inputValues, expectedOutputValues,
8425 1.f/20.f, 50, armnn::DataLayout::NCHW);
8426}
8427
8428LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8429 armnn::IWorkloadFactory& workloadFactory,
8430 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8431{
8432 // BatchSize: 1
8433 // Height: 3
8434 // Width: 2
8435 // Channels: 2
8436
8437 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8438 std::vector<float> inputValues
8439 {
8440 // Batch 0, Height 0, Width (2) x Channel (2)
8441 1.f, 1.f,
8442 4.f, 1.f,
8443
8444 // Batch 0, Height 1, Width (2) x Channel (2)
8445 4.f, 4.f,
8446 2.f, 1.f,
8447
8448 // Batch 0, Height 2, Width (2) x Channel (2)
8449 1.f, -2.f,
8450 6.f, 4.f
8451 };
8452 std::vector<float> expectedOutputValues
8453 {
8454 // Batch 0, Height 0, Width (2) x Channel (2)
8455 1.f, 3.f,
8456 4.f, 3.f,
8457
8458 // Batch 0, Height 1, Width (2) x Channel (2)
8459 4.f, 4.f,
8460 2.f, 3.f,
8461
8462 // Batch 0, Height 2, Width (2) x Channel (2)
8463 1.f, 2.f,
8464 6.f, 4.f
8465 };
8466
8467 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8468 (workloadFactory, memoryManager,
8469 inputOutputShape, inputValues, expectedOutputValues,
8470 1.f/20.f, 50, armnn::DataLayout::NHWC);
8471}
8472
Nina Drozd58ef2c62019-05-16 12:09:18 +01008473LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008474 armnn::IWorkloadFactory& workloadFactory,
8475 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008476{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008477 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008478}
8479
Nina Drozd58ef2c62019-05-16 12:09:18 +01008480LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8481 armnn::IWorkloadFactory& workloadFactory,
8482 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8483{
8484 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8485}
8486
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008487LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8488 armnn::IWorkloadFactory& workloadFactory,
8489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008490{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008491 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008492}
8493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008494LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8495 armnn::IWorkloadFactory& workloadFactory,
8496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008497{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008498 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008499}
8500
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008501LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8502 armnn::IWorkloadFactory& workloadFactory,
8503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008504{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008505 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008506}
8507
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008508LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8509 armnn::IWorkloadFactory& workloadFactory,
8510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008511{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008512 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8513 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008514}
8515
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008516LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8517 armnn::IWorkloadFactory& workloadFactory,
8518 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008519{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008520 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8521 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008522}
8523
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008524LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8525 armnn::IWorkloadFactory& workloadFactory,
8526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008527{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008528 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008529}
8530
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008531LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8532 armnn::IWorkloadFactory& workloadFactory,
8533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008534{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008535 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008536}
8537
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008538LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8539 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8541 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008542{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008543 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8544 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008545}
8546
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008547LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8548 armnn::IWorkloadFactory& workloadFactory,
8549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008550{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008551 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008552}
8553
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008554LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8555 armnn::IWorkloadFactory& workloadFactory,
8556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008557{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008558 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8559 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008560}
8561
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008562LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8563 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8565 bool useSubtensor)
8566{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008567 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8568 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008569}
8570
8571LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8572 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008574{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008575 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008576}
8577
8578LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8579 armnn::IWorkloadFactory& workloadFactory,
8580 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8581{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008582 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008583}
8584
8585LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8586 armnn::IWorkloadFactory& workloadFactory,
8587 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8588{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008589 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008590}
8591
8592LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8593 armnn::IWorkloadFactory& workloadFactory,
8594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8595{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008596 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8597 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008598}
8599
8600LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8601 armnn::IWorkloadFactory& workloadFactory,
8602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8603{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008604 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8605 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008606}
8607
8608LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8609 armnn::IWorkloadFactory& workloadFactory,
8610 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8611{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008612 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8613 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008614}
8615
8616LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8617 armnn::IWorkloadFactory& workloadFactory,
8618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8619{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008620 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8621 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008622}
8623
8624LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8625 armnn::IWorkloadFactory& workloadFactory,
8626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8627 bool useSubtensor)
8628{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008629 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8630 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008631}
8632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008633LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8634 armnn::IWorkloadFactory& workloadFactory,
8635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8636 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008637{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008638 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8639 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008640}
8641
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008642LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8643 armnn::IWorkloadFactory& workloadFactory,
8644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8645 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008646{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008647 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008648 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008649}
8650
Teresa Charlin0434df62019-06-06 13:40:35 +01008651LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
8652 armnn::IWorkloadFactory& workloadFactory,
8653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8654 bool forceNoPadding)
8655{
8656 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
8657 workloadFactory, memoryManager, forceNoPadding);
8658}
8659
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008660LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8661 armnn::IWorkloadFactory& workloadFactory,
8662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8663 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008664{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008665 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8666 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008667}
8668
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008669LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8670 armnn::IWorkloadFactory& workloadFactory,
8671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8672 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008673{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008674 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008675 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008676}
8677
Teresa Charlin0434df62019-06-06 13:40:35 +01008678LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
8679 armnn::IWorkloadFactory& workloadFactory,
8680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8681 bool forceNoPadding)
8682{
8683 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
8684 workloadFactory, memoryManager, forceNoPadding);
8685}
8686
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008687LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8688 armnn::IWorkloadFactory& workloadFactory,
8689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008690 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008691{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008692 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008693}
8694
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008695LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8696 armnn::IWorkloadFactory& workloadFactory,
8697 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008698 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008699{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008700 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008701}
8702
Teresa Charlin0434df62019-06-06 13:40:35 +01008703LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
8704 armnn::IWorkloadFactory& workloadFactory,
8705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8706 const armnn::DataLayout dataLayout)
8707{
8708 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8709}
8710LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8711 armnn::IWorkloadFactory& workloadFactory,
8712 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8713{
8714 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8715}
8716
8717LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8718 armnn::IWorkloadFactory& workloadFactory,
8719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8720{
8721 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8722 workloadFactory, memoryManager, 1.0f, -5);
8723}
8724
8725LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
8726 armnn::IWorkloadFactory& workloadFactory,
8727 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8728{
8729 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8730 workloadFactory, memoryManager);
8731}
8732
8733LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8734 armnn::IWorkloadFactory& workloadFactory,
8735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8736{
8737 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8738}
8739
8740LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8741 armnn::IWorkloadFactory& workloadFactory,
8742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8743{
8744 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8745 workloadFactory, memoryManager, 1.0f, -5);
8746}
8747
8748LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
8749 armnn::IWorkloadFactory& workloadFactory,
8750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8751{
8752 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8753 workloadFactory, memoryManager);
8754}
8755
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008756LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8757 armnn::IWorkloadFactory& workloadFactory,
8758 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008759 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008760{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008761 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008762}
8763
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008764LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8765 armnn::IWorkloadFactory& workloadFactory,
8766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008767 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008769 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008770 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008771}
8772
Teresa Charlin0434df62019-06-06 13:40:35 +01008773LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
8774 armnn::IWorkloadFactory& workloadFactory,
8775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8776 const armnn::DataLayout dataLayout)
8777{
8778 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8779 workloadFactory, memoryManager, dataLayout);
8780}
8781
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008782LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8783 armnn::IWorkloadFactory& workloadFactory,
8784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8785 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008786{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008787 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008788 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008789}
8790
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008791LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8792 armnn::IWorkloadFactory& workloadFactory,
8793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008794{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008795 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008796}
8797
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008798LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8799 armnn::IWorkloadFactory& workloadFactory,
8800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008801{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008802 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8803 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008804}
8805
Teresa Charlin0434df62019-06-06 13:40:35 +01008806LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
8807 armnn::IWorkloadFactory& workloadFactory,
8808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8809{
8810 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8811 workloadFactory, memoryManager);
8812}
8813LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8814 armnn::IWorkloadFactory& workloadFactory,
8815 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8816{
8817 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8818}
8819
8820LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8821 armnn::IWorkloadFactory& workloadFactory,
8822 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8823{
8824 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8825 workloadFactory, memoryManager);
8826}
8827
8828LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
8829 armnn::IWorkloadFactory& workloadFactory,
8830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8831{
8832 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8833 workloadFactory, memoryManager);
8834}
8835
8836LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8837 armnn::IWorkloadFactory& workloadFactory,
8838 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8839{
8840 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8841 workloadFactory, memoryManager);
8842}
8843
8844LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
8845 armnn::IWorkloadFactory& workloadFactory,
8846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8847{
8848 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8849 workloadFactory, memoryManager);
8850}
8851
8852LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
8853 armnn::IWorkloadFactory& workloadFactory,
8854 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8855{
8856 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
8857 workloadFactory, memoryManager);
8858}
8859
8860LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8861 armnn::IWorkloadFactory& workloadFactory,
8862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8863{
8864 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8865}
8866
8867LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8868 armnn::IWorkloadFactory& workloadFactory,
8869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8870{
8871 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8872 workloadFactory, memoryManager);
8873}
8874
8875LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
8876 armnn::IWorkloadFactory& workloadFactory,
8877 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8878{
8879 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8880 workloadFactory, memoryManager);
8881}
8882
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008883LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8884 armnn::IWorkloadFactory& workloadFactory,
8885 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008886 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008887{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008888 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008889}
8890
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008891LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8892 armnn::IWorkloadFactory& workloadFactory,
8893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008894 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008895{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008896 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008897}
8898
Teresa Charlin0434df62019-06-06 13:40:35 +01008899LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
8900 armnn::IWorkloadFactory& workloadFactory,
8901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8902 const armnn::DataLayout dataLayout)
8903{
8904 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8905}
8906
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008907LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8908 armnn::IWorkloadFactory& workloadFactory,
8909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008910{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008911 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008912}
8913
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008914LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8915 armnn::IWorkloadFactory& workloadFactory,
8916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008917{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008918 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008919}
8920
Teresa Charlin0434df62019-06-06 13:40:35 +01008921LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
8922 armnn::IWorkloadFactory& workloadFactory,
8923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8924{
8925 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8926}
8927
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008928LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8929 armnn::IWorkloadFactory& workloadFactory,
8930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008931{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008932 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008933}
8934
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008935LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8936 armnn::IWorkloadFactory& workloadFactory,
8937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008938{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008939 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008940}
8941
Teresa Charlin0434df62019-06-06 13:40:35 +01008942LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
8943 armnn::IWorkloadFactory& workloadFactory,
8944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8945{
8946 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8947}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008948LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8949 armnn::IWorkloadFactory& workloadFactory,
8950 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008951{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008952 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008953}
8954
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008955LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8956 armnn::IWorkloadFactory& workloadFactory,
8957 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008958{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008959 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008960}
8961
Teresa Charlin0434df62019-06-06 13:40:35 +01008962LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
8963 armnn::IWorkloadFactory& workloadFactory,
8964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8965{
8966 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8967}
8968
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008969LayerTestResult<float, 4> L2Pooling2dSize7Test(
8970 armnn::IWorkloadFactory& workloadFactory,
8971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008972{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008973 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008974}
8975
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008976LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8977 armnn::IWorkloadFactory& workloadFactory,
8978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008979{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008980 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008981}
8982
Teresa Charlin0434df62019-06-06 13:40:35 +01008983LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
8984 armnn::IWorkloadFactory& workloadFactory,
8985 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8986{
8987 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8988}
8989
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008990LayerTestResult<float, 4> L2Pooling2dSize9Test(
8991 armnn::IWorkloadFactory& workloadFactory,
8992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008993{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008994 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008995}
8996
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008997LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8998 armnn::IWorkloadFactory& workloadFactory,
8999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009000{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009001 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009002}
9003
Teresa Charlin0434df62019-06-06 13:40:35 +01009004LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
9005 armnn::IWorkloadFactory& workloadFactory,
9006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9007{
9008 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9009}
9010LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
9011 armnn::IWorkloadFactory& workloadFactory,
9012 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9013{
9014 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9015}
9016
9017LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
9018 armnn::IWorkloadFactory& workloadFactory,
9019 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9020{
9021 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9022}
9023
9024LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
9025 armnn::IWorkloadFactory& workloadFactory,
9026 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9027{
9028 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9029}
9030
9031LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
9032 armnn::IWorkloadFactory& workloadFactory,
9033 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9034{
9035 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9036}
9037
9038LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
9039 armnn::IWorkloadFactory& workloadFactory,
9040 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9041{
9042 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9043}
9044
9045LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
9046 armnn::IWorkloadFactory& workloadFactory,
9047 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9048{
9049 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9050}
9051
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009052LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
9053 armnn::IWorkloadFactory& workloadFactory,
9054 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009055{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009056 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009057}
9058
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009059LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
9060 armnn::IWorkloadFactory& workloadFactory,
9061 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009062{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009063 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009064}
9065
Teresa Charlin0434df62019-06-06 13:40:35 +01009066LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
9067 armnn::IWorkloadFactory& workloadFactory,
9068 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9069{
9070 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9071}
9072
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009073LayerTestResult<float, 4> ComparePooling2dTest(
9074 armnn::IWorkloadFactory& workloadFactory,
9075 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9076 armnn::IWorkloadFactory& refWorkloadFactory,
9077 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009078{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009079 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009080 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00009081}
9082
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009083LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
9084 armnn::IWorkloadFactory& workloadFactory,
9085 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9086 armnn::IWorkloadFactory& refWorkloadFactory,
9087 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009088{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009089 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009090 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009091}
9092
Teresa Charlin0434df62019-06-06 13:40:35 +01009093LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
9094 armnn::IWorkloadFactory& workloadFactory,
9095 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9096 armnn::IWorkloadFactory& refWorkloadFactory,
9097 armnn::PoolingAlgorithm poolingType)
9098{
9099 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9100 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9101}
9102
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009103LayerTestResult<float, 2> FullyConnectedLargeTest(
9104 armnn::IWorkloadFactory& workloadFactory,
9105 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9106 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00009107{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009108 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00009109}
9110
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009111LayerTestResult<float, 4> SimplePermuteFloat32Test(
9112 armnn::IWorkloadFactory& workloadFactory,
9113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009114{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009115 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009116};
9117
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009118LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
9119 armnn::IWorkloadFactory& workloadFactory,
9120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009121{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009122 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009123};
surmeh01bceff2f2018-03-29 16:29:27 +01009124
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009125LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
9126 armnn::IWorkloadFactory& workloadFactory,
9127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01009128{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009129 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01009130};
9131
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009132LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
9133 armnn::IWorkloadFactory& workloadFactory,
9134 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01009135{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009136 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01009137};
9138
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009139LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
9140 armnn::IWorkloadFactory& workloadFactory,
9141 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01009142{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009143 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01009144};
9145
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009146LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9147 armnn::IWorkloadFactory& workloadFactory,
9148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009149{
9150 // Create Initial Tensor
9151 // 1, 2, 3
9152 // 4, 5, 6
9153 // 7, 8, 9
9154
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009155 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9156 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009157
9158 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9159 {1, 2, 3,
9160 4, 5, 6,
9161 7, 8, 9
9162 });
9163
9164 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9165 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9166 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9167 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9168
9169 // Apply MaxPool poolSize = 1x1, stride=2x2
9170 // Result =
9171 // 1, 3
9172 // 7, 9
9173 armnn::Pooling2dDescriptor descriptor;
9174 descriptor.m_PoolHeight = 1;
9175 descriptor.m_PoolWidth = 1;
9176 descriptor.m_StrideX = 2;
9177 descriptor.m_StrideY = 2;
9178 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9179
9180 armnn::Pooling2dQueueDescriptor queueDescriptor;
9181 queueDescriptor.m_Parameters = descriptor;
9182 armnn::WorkloadInfo workloadInfo;
9183 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9184 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9185
9186 // Create the MaxPool
9187 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9188
9189 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9190 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9191 boost::multi_array<float, 4> resultMaxPool;
9192 resultMaxPool.resize(shape);
9193
9194
9195 // Create addition with another tensor the same size
9196 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9197 // with the initial tensor.
9198 // 12, 16
9199 // 24, 28
9200
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009201 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9202 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009203
9204 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9205 {12, 16,
9206 24, 28,
9207 });
9208
9209 // Expected output tensor after MaxPool and Addition.
9210 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9211 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9212 {
9213 13, 19,
9214 31, 37
9215 }));
9216
9217 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9218 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9219
9220 armnn::AdditionQueueDescriptor data;
9221 armnn::WorkloadInfo info;
9222
9223 // Add the output of the MaxPool and the new tensor
9224 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9225 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9226 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9227
9228 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9229
9230 poolingInputHandle->Allocate();
9231 poolingOutputHandle->Allocate();
9232 addInputHandle->Allocate();
9233 addOutputHandle->Allocate();
9234
9235 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9236 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9237
9238 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9239 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9240
Derek Lambertif30f7d32019-04-09 10:25:02 +01009241 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009242 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009243 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009244 addWorkload->Execute();
9245
9246 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9247
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009248 return addRet;
9249}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009250
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009251LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9252 armnn::IWorkloadFactory& workloadFactory,
9253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009254{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009255 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009256}
9257
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009258LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9259 armnn::IWorkloadFactory& workloadFactory,
9260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009261{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009262 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009263}
9264
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009265LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9266 armnn::IWorkloadFactory& workloadFactory,
9267 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009268{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009269 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009270}
9271
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009272LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9273 armnn::IWorkloadFactory& workloadFactory,
9274 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009275{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009276 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009277}
9278
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009279LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9280 armnn::IWorkloadFactory& workloadFactory,
9281 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009282{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009283 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009284}
9285
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009286LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9287 armnn::IWorkloadFactory& workloadFactory,
9288 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009289{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009290 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009291}
9292
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009293LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9294 armnn::IWorkloadFactory& workloadFactory,
9295 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009296{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009297 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009298}
9299
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009300LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9301 armnn::IWorkloadFactory& workloadFactory,
9302 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009303{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009304 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009305}
9306
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009307LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9308 armnn::IWorkloadFactory& workloadFactory,
9309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009310{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009311 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009312}
9313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009314LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9315 armnn::IWorkloadFactory& workloadFactory,
9316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009317{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009318 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009319}
9320
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009321LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9322 armnn::IWorkloadFactory& workloadFactory,
9323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009324{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009325 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009326}
9327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009328LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9329 armnn::IWorkloadFactory& workloadFactory,
9330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009331{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009332 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009333}
9334
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009335LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9336 armnn::IWorkloadFactory& workloadFactory,
9337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009338{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009339 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009340}
9341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009342LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9343 armnn::IWorkloadFactory& workloadFactory,
9344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009345{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009346 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009347}
9348
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009349LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9350 armnn::IWorkloadFactory& workloadFactory,
9351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009352{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009353 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009354}
9355
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009356LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9357 armnn::IWorkloadFactory& workloadFactory,
9358 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009359{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009360 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009361}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009362
nikraj01120522a2019-05-31 11:33:07 +01009363LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9364 armnn::IWorkloadFactory& workloadFactory,
9365 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9366{
9367 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9368}
9369
9370LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9371 armnn::IWorkloadFactory& workloadFactory,
9372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9373{
9374 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9375}
9376
9377LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9378 armnn::IWorkloadFactory& workloadFactory,
9379 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9380{
9381 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9382}
9383
9384LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9385 armnn::IWorkloadFactory& workloadFactory,
9386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9387{
9388 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9389}
9390
9391LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9392 armnn::IWorkloadFactory& workloadFactory,
9393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9394{
9395 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9396}
9397
9398LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9399 armnn::IWorkloadFactory& workloadFactory,
9400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9401{
9402 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9403}
9404
9405LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9406 armnn::IWorkloadFactory& workloadFactory,
9407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9408{
9409 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9410}
9411
9412LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9413 armnn::IWorkloadFactory& workloadFactory,
9414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9415{
9416 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9417}
9418
Keith Davisa57eccb2019-06-14 17:33:22 +01009419
9420LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9421 armnn::IWorkloadFactory& workloadFactory,
9422 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9423{
James Conroyd2aa85e2019-07-01 17:12:40 +01009424 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009425 workloadFactory,
9426 memoryManager);
9427}
9428
9429LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9430 armnn::IWorkloadFactory& workloadFactory,
9431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9432{
James Conroyd2aa85e2019-07-01 17:12:40 +01009433 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009434 workloadFactory,
9435 memoryManager,
9436 armnn::DataLayout::NCHW);
9437}
9438
James Conroyd2aa85e2019-07-01 17:12:40 +01009439LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009440 armnn::IWorkloadFactory& workloadFactory,
9441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9442{
James Conroyd2aa85e2019-07-01 17:12:40 +01009443 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009444 workloadFactory,
9445 memoryManager);
9446}
9447
James Conroyd2aa85e2019-07-01 17:12:40 +01009448LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009449 armnn::IWorkloadFactory& workloadFactory,
9450 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9451{
James Conroyd2aa85e2019-07-01 17:12:40 +01009452 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
9453 workloadFactory,
9454 memoryManager,
9455 armnn::DataLayout::NCHW);
9456}
9457
9458LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test2(
9459 armnn::IWorkloadFactory& workloadFactory,
9460 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9461{
9462 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9463 workloadFactory,
9464 memoryManager);
9465}
9466
9467LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test2(
9468 armnn::IWorkloadFactory& workloadFactory,
9469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9470{
9471 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9472 workloadFactory,
9473 memoryManager,
9474 armnn::DataLayout::NCHW);
9475}
9476
9477LayerTestResult<int16_t, 4> SpaceToDepthNHWCQSymm16Test(
9478 armnn::IWorkloadFactory& workloadFactory,
9479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9480{
9481 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
9482 workloadFactory,
9483 memoryManager);
9484}
9485
9486LayerTestResult<int16_t, 4> SpaceToDepthNCHWQSymm16Test(
9487 armnn::IWorkloadFactory& workloadFactory,
9488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9489{
9490 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009491 workloadFactory,
9492 memoryManager,
9493 armnn::DataLayout::NCHW);
9494}
9495
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009496namespace {
9497
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009498} // anonymous namespace
9499
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009500LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9501 armnn::IWorkloadFactory& workloadFactory,
9502 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9503{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009504 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009505}
9506
9507LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9508 armnn::IWorkloadFactory& workloadFactory,
9509 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9510{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009511 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009512}
9513
9514LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9515 armnn::IWorkloadFactory& workloadFactory,
9516 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9517{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009518 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009519}
9520
9521LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9522 armnn::IWorkloadFactory& workloadFactory,
9523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9524{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009525 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009526}
9527
9528LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9529 armnn::IWorkloadFactory& workloadFactory,
9530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9531{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009532 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009533}
9534
9535LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9536 armnn::IWorkloadFactory& workloadFactory,
9537 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9538{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009539 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009540}
9541
9542LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9543 armnn::IWorkloadFactory& workloadFactory,
9544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9545{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009546 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009547}
9548
9549LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9550 armnn::IWorkloadFactory& workloadFactory,
9551 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9552{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009553 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009554}
9555
9556LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9557 armnn::IWorkloadFactory& workloadFactory,
9558 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9559{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009560 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009561}
9562
9563LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9564 armnn::IWorkloadFactory& workloadFactory,
9565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9566{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009567 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009568}
9569
9570LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9571 armnn::IWorkloadFactory& workloadFactory,
9572 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9573{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009574 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009575}
9576
9577LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9578 armnn::IWorkloadFactory& workloadFactory,
9579 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9580{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009581 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009582}
9583
9584LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9585 armnn::IWorkloadFactory& workloadFactory,
9586 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9587{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009588 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009589}
9590
9591LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9592 armnn::IWorkloadFactory& workloadFactory,
9593 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9594{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009595 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009596}
9597
9598LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9599 armnn::IWorkloadFactory& workloadFactory,
9600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9601{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009602 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009603}
9604
9605LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9606 armnn::IWorkloadFactory& workloadFactory,
9607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9608{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009609 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009610}
9611
9612LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9613 armnn::IWorkloadFactory& workloadFactory,
9614 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9615{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009616 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009617}
9618
9619LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9620 armnn::IWorkloadFactory& workloadFactory,
9621 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9622{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009623 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009624}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009625
Matteo Martincigh42666a12019-05-29 08:53:41 +01009626LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
9627 armnn::IWorkloadFactory& workloadFactory,
9628 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9629{
9630 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9631}
9632
9633LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
9634 armnn::IWorkloadFactory& workloadFactory,
9635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9636{
9637 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9638}
9639
9640LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
9641 armnn::IWorkloadFactory& workloadFactory,
9642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9643{
9644 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9645}
9646
9647LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
9648 armnn::IWorkloadFactory& workloadFactory,
9649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9650{
9651 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9652}
9653
9654LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
9655 armnn::IWorkloadFactory& workloadFactory,
9656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9657{
9658 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9659}
9660
9661LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
9662 armnn::IWorkloadFactory& workloadFactory,
9663 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9664{
9665 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9666}
9667
9668LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
9669 armnn::IWorkloadFactory& workloadFactory,
9670 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9671{
9672 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9673}
9674
9675LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
9676 armnn::IWorkloadFactory& workloadFactory,
9677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9678{
9679 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9680}
9681
9682LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
9683 armnn::IWorkloadFactory& workloadFactory,
9684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9685{
9686 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9687}
9688
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009689LayerTestResult<float, 4> Debug4DFloat32Test(
9690 armnn::IWorkloadFactory& workloadFactory,
9691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9692{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009693 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009694}
9695
9696LayerTestResult<float, 3> Debug3DFloat32Test(
9697 armnn::IWorkloadFactory& workloadFactory,
9698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9699{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009700 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009701}
9702
9703LayerTestResult<float, 2> Debug2DFloat32Test(
9704 armnn::IWorkloadFactory& workloadFactory,
9705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9706{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009707 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009708}
9709
9710LayerTestResult<float, 1> Debug1DFloat32Test(
9711 armnn::IWorkloadFactory& workloadFactory,
9712 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9713{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009714 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009715}
9716
9717LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9718 armnn::IWorkloadFactory& workloadFactory,
9719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9720{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009721 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009722}
9723
9724LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9725 armnn::IWorkloadFactory& workloadFactory,
9726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9727{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009728 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009729}
9730
9731LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9732 armnn::IWorkloadFactory& workloadFactory,
9733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9734{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009735 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009736}
9737
9738LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9739 armnn::IWorkloadFactory& workloadFactory,
9740 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9741{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009742 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009743}
Matteo Martincigh49124022019-01-11 13:25:59 +00009744
narpra014951d842019-01-18 16:53:53 +00009745LayerTestResult<float, 1> Gather1DParamsFloatTest(
9746 armnn::IWorkloadFactory& workloadFactory,
9747 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9748{
9749 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9750}
9751
9752LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9753 armnn::IWorkloadFactory& workloadFactory,
9754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9755{
9756 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9757}
9758
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009759LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
9760 armnn::IWorkloadFactory& workloadFactory,
9761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9762{
9763 return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9764}
9765
narpra014951d842019-01-18 16:53:53 +00009766LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9767 armnn::IWorkloadFactory& workloadFactory,
9768 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9769{
9770 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9771}
9772
9773LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9774 armnn::IWorkloadFactory& workloadFactory,
9775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9776{
9777 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9778}
9779
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009780LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
9781 armnn::IWorkloadFactory& workloadFactory,
9782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9783{
9784 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9785}
9786
narpra014951d842019-01-18 16:53:53 +00009787LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9788 armnn::IWorkloadFactory& workloadFactory,
9789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9790{
9791 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9792}
9793
9794LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9795 armnn::IWorkloadFactory& workloadFactory,
9796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9797{
9798 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9799 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009800}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009801
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009802LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
9803 armnn::IWorkloadFactory& workloadFactory,
9804 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9805{
9806 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
9807 workloadFactory, memoryManager);
9808}
9809
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009810LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009811 armnn::IWorkloadFactory& workloadFactory,
9812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9813{
9814 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9815}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009816
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009817LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9818 armnn::IWorkloadFactory& workloadFactory,
9819 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9820{
9821 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9822}
9823
9824LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9825 armnn::IWorkloadFactory& workloadFactory,
9826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9827{
9828 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9829}
9830
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009831LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9832 armnn::IWorkloadFactory& workloadFactory,
9833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9834{
9835 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9836}
9837
9838LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9839 armnn::IWorkloadFactory& workloadFactory,
9840 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9841{
9842 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9843}
9844
9845LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9846 armnn::IWorkloadFactory& workloadFactory,
9847 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9848{
9849 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9850}
Aron Virginas-Tar735a4502019-06-26 15:02:47 +01009851
9852//
9853// TransposeConvolution2d
9854//
9855
9856// Simple biased
9857LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNchwTest(
9858 armnn::IWorkloadFactory& workloadFactory,
9859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9860{
9861 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9862 workloadFactory,
9863 memoryManager,
9864 true,
9865 armnn::DataLayout::NCHW);
9866}
9867
9868LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNhwcTest(
9869 armnn::IWorkloadFactory& workloadFactory,
9870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9871{
9872 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9873 workloadFactory,
9874 memoryManager,
9875 true,
9876 armnn::DataLayout::NHWC);
9877}
9878
9879LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NchwTest(
9880 armnn::IWorkloadFactory& workloadFactory,
9881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9882{
9883 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9884 workloadFactory,
9885 memoryManager,
9886 true,
9887 armnn::DataLayout::NCHW);
9888}
9889
9890LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NhwcTest(
9891 armnn::IWorkloadFactory& workloadFactory,
9892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9893{
9894 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9895 workloadFactory,
9896 memoryManager,
9897 true,
9898 armnn::DataLayout::NHWC);
9899}
9900
9901LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NchwTest(
9902 armnn::IWorkloadFactory& workloadFactory,
9903 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9904{
9905 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9906 workloadFactory,
9907 memoryManager,
9908 true,
9909 armnn::DataLayout::NCHW);
9910}
9911
9912LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NhwcTest(
9913 armnn::IWorkloadFactory& workloadFactory,
9914 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9915{
9916 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9917 workloadFactory,
9918 memoryManager,
9919 true,
9920 armnn::DataLayout::NHWC);
9921}
9922
9923// Simple unbiased
9924LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNchwTest(
9925 armnn::IWorkloadFactory& workloadFactory,
9926 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9927{
9928 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9929 workloadFactory,
9930 memoryManager,
9931 false,
9932 armnn::DataLayout::NCHW);
9933}
9934
9935LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNhwcTest(
9936 armnn::IWorkloadFactory& workloadFactory,
9937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9938{
9939 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9940 workloadFactory,
9941 memoryManager,
9942 false,
9943 armnn::DataLayout::NHWC);
9944}
9945
9946LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NchwTest(
9947 armnn::IWorkloadFactory& workloadFactory,
9948 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9949{
9950 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9951 workloadFactory,
9952 memoryManager,
9953 false,
9954 armnn::DataLayout::NCHW);
9955}
9956
9957LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NhwcTest(
9958 armnn::IWorkloadFactory& workloadFactory,
9959 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9960{
9961 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9962 workloadFactory,
9963 memoryManager,
9964 false,
9965 armnn::DataLayout::NHWC);
9966}
9967
9968LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NchwTest(
9969 armnn::IWorkloadFactory& workloadFactory,
9970 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9971{
9972 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9973 workloadFactory,
9974 memoryManager,
9975 false,
9976 armnn::DataLayout::NCHW);
9977}
9978
9979LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NhwcTest(
9980 armnn::IWorkloadFactory& workloadFactory,
9981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9982{
9983 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9984 workloadFactory,
9985 memoryManager,
9986 false,
9987 armnn::DataLayout::NHWC);
9988}
9989
9990// Padded biased
9991LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNchwTest(
9992 armnn::IWorkloadFactory& workloadFactory,
9993 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9994{
9995 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9996 workloadFactory,
9997 memoryManager,
9998 true,
9999 armnn::DataLayout::NCHW);
10000}
10001
10002LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNhwcTest(
10003 armnn::IWorkloadFactory& workloadFactory,
10004 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10005{
10006 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10007 workloadFactory,
10008 memoryManager,
10009 true,
10010 armnn::DataLayout::NHWC);
10011}
10012
10013LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NchwTest(
10014 armnn::IWorkloadFactory& workloadFactory,
10015 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10016{
10017 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10018 workloadFactory,
10019 memoryManager,
10020 true,
10021 armnn::DataLayout::NCHW);
10022}
10023
10024LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NhwcTest(
10025 armnn::IWorkloadFactory& workloadFactory,
10026 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10027{
10028 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10029 workloadFactory,
10030 memoryManager,
10031 true,
10032 armnn::DataLayout::NHWC);
10033}
10034
10035LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NchwTest(
10036 armnn::IWorkloadFactory& workloadFactory,
10037 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10038{
10039 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10040 workloadFactory,
10041 memoryManager,
10042 true,
10043 armnn::DataLayout::NCHW);
10044}
10045
10046LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NhwcTest(
10047 armnn::IWorkloadFactory& workloadFactory,
10048 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10049{
10050 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10051 workloadFactory,
10052 memoryManager,
10053 true,
10054 armnn::DataLayout::NHWC);
10055}
10056
10057// Padded unbiased
10058LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNchwTest(
10059 armnn::IWorkloadFactory& workloadFactory,
10060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10061{
10062 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10063 workloadFactory,
10064 memoryManager,
10065 false,
10066 armnn::DataLayout::NCHW);
10067}
10068
10069LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNhwcTest(
10070 armnn::IWorkloadFactory& workloadFactory,
10071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10072{
10073 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10074 workloadFactory,
10075 memoryManager,
10076 false,
10077 armnn::DataLayout::NHWC);
10078}
10079
10080LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NchwTest(
10081 armnn::IWorkloadFactory& workloadFactory,
10082 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10083{
10084 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10085 workloadFactory,
10086 memoryManager,
10087 false,
10088 armnn::DataLayout::NCHW);
10089}
10090
10091LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NhwcTest(
10092 armnn::IWorkloadFactory& workloadFactory,
10093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10094{
10095 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10096 workloadFactory,
10097 memoryManager,
10098 false,
10099 armnn::DataLayout::NHWC);
10100}
10101
10102LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NchwTest(
10103 armnn::IWorkloadFactory& workloadFactory,
10104 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10105{
10106 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10107 workloadFactory,
10108 memoryManager,
10109 false,
10110 armnn::DataLayout::NCHW);
10111}
10112
10113LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NhwcTest(
10114 armnn::IWorkloadFactory& workloadFactory,
10115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10116{
10117 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10118 workloadFactory,
10119 memoryManager,
10120 false,
10121 armnn::DataLayout::NHWC);
10122}
10123
10124// Strided biased
10125LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNchwTest(
10126 armnn::IWorkloadFactory& workloadFactory,
10127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10128{
10129 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10130 workloadFactory,
10131 memoryManager,
10132 true,
10133 armnn::DataLayout::NCHW);
10134}
10135
10136LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNhwcTest(
10137 armnn::IWorkloadFactory& workloadFactory,
10138 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10139{
10140 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10141 workloadFactory,
10142 memoryManager,
10143 true,
10144 armnn::DataLayout::NHWC);
10145}
10146
10147LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NchwTest(
10148 armnn::IWorkloadFactory& workloadFactory,
10149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10150{
10151 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10152 workloadFactory,
10153 memoryManager,
10154 true,
10155 armnn::DataLayout::NCHW);
10156}
10157
10158LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NhwcTest(
10159 armnn::IWorkloadFactory& workloadFactory,
10160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10161{
10162 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10163 workloadFactory,
10164 memoryManager,
10165 true,
10166 armnn::DataLayout::NHWC);
10167}
10168
10169LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NchwTest(
10170 armnn::IWorkloadFactory& workloadFactory,
10171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10172{
10173 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10174 workloadFactory,
10175 memoryManager,
10176 true,
10177 armnn::DataLayout::NCHW);
10178}
10179
10180LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NhwcTest(
10181 armnn::IWorkloadFactory& workloadFactory,
10182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10183{
10184 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10185 workloadFactory,
10186 memoryManager,
10187 true,
10188 armnn::DataLayout::NHWC);
10189}
10190
10191// Strided unbiased
10192LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNchwTest(
10193 armnn::IWorkloadFactory& workloadFactory,
10194 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10195{
10196 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10197 workloadFactory,
10198 memoryManager,
10199 false,
10200 armnn::DataLayout::NCHW);
10201}
10202
10203LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNhwcTest(
10204 armnn::IWorkloadFactory& workloadFactory,
10205 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10206{
10207 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10208 workloadFactory,
10209 memoryManager,
10210 false,
10211 armnn::DataLayout::NHWC);
10212}
10213
10214LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NchwTest(
10215 armnn::IWorkloadFactory& workloadFactory,
10216 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10217{
10218 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10219 workloadFactory,
10220 memoryManager,
10221 false,
10222 armnn::DataLayout::NCHW);
10223}
10224
10225LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NhwcTest(
10226 armnn::IWorkloadFactory& workloadFactory,
10227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10228{
10229 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10230 workloadFactory,
10231 memoryManager,
10232 false,
10233 armnn::DataLayout::NHWC);
10234}
10235
10236LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NchwTest(
10237 armnn::IWorkloadFactory& workloadFactory,
10238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10239{
10240 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10241 workloadFactory,
10242 memoryManager,
10243 false,
10244 armnn::DataLayout::NCHW);
10245}
10246
10247LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NhwcTest(
10248 armnn::IWorkloadFactory& workloadFactory,
10249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10250{
10251 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10252 workloadFactory,
10253 memoryManager,
10254 false,
10255 armnn::DataLayout::NHWC);
10256}