blob: ca39438fbf5a59012811460eacb384b2a14200af [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
Keith Davisa57eccb2019-06-14 17:33:22 +010036#include "SpaceToDepthTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000046#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010047#include "QuantizeTestImpl.hpp"
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010048#include "TransposeConvolution2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000049
telsoa01c577f2c2018-08-31 09:22:23 +010050// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000051static std::vector<float> ConvInput3x8x16({
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
75 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
76});
77
telsoa01c577f2c2018-08-31 09:22:23 +010078// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000079static std::vector<float> Bias2({0, 2});
80
telsoa01c577f2c2018-08-31 09:22:23 +010081// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000082template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +010083boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +000084{
85 if(biasEnabled)
86 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000087 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +010088 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +000089 return bias;
90 }
91 else
92 {
93 return boost::multi_array<T, 1>();
94 }
95}
96
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000097template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000098LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
99 armnn::IWorkloadFactory& workloadFactory,
100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
101 float qScale,
102 int32_t qOffset,
103 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000104 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000105{
telsoa01c577f2c2018-08-31 09:22:23 +0100106 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000107 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000108 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
109
telsoa01c577f2c2018-08-31 09:22:23 +0100110 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000111 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000112 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
113 QuantizedVector<T>(qScale, qOffset, {
114 1, 1, 1,
115 1, -1, 1,
116 1, 1, 1,
117 1, 1, 1,
118 1, 1, 1,
119
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124 0, 0, 0,
125
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130 2, 2, 2,
131
132
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137 0, 0, 0,
138
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143 1, 1, 1,
144
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0,
149 0, 0, 0
150 })));
151
telsoa01c577f2c2018-08-31 09:22:23 +0100152 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000153 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000154 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
155 QuantizedVector<T>(qScale, qOffset, {
156 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
157 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
158 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
161 -23.5f, -23.5f, -23.5f,
162
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
166 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
167 })));
168
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000169 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
170 workloadFactory,
171 memoryManager,
172 input,
173 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100174 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000175 expectedOutput,
176 qScale,
177 qOffset,
178 layout);
telsoa014fcda012018-03-09 14:13:49 +0000179}
180
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000181template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
182 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000183LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
184 armnn::IWorkloadFactory& workloadFactory,
185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
186 float qScale,
187 int32_t qOffset,
188 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000189 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000190{
telsoa01c577f2c2018-08-31 09:22:23 +0100191 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000192
telsoa01c577f2c2018-08-31 09:22:23 +0100193 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000194 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000195 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
196
telsoa01c577f2c2018-08-31 09:22:23 +0100197 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000198 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000199 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
200 QuantizedVector<T>(qScale, qOffset, {
201 1, 1, 1,
202 1, -1, 1,
203 1, 1, 1,
204
205 0, 0, 0,
206 0, 0, 0,
207 0, 0, 0,
208
209 2, 2, 2,
210 2, 2, 2,
211 2, 2, 2,
212
213
214 0, 0, 0,
215 0, 0, 0,
216 0, 0, 0,
217
218 1, 1, 1,
219 1, 1, 1,
220 1, 1, 1,
221
222 0, 0, 0,
223 0, 0, 0,
224 0, 0, 0
225 })));
226
telsoa01c577f2c2018-08-31 09:22:23 +0100227 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000228 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000229 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
230 QuantizedVector<T>(qScale, qOffset, {
231 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
232 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
237
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
243 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
244 })));
245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000246 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
247 workloadFactory,
248 memoryManager,
249 input,
250 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100251 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000252 expectedOutput,
253 qScale,
254 qOffset,
255 layout);
telsoa014fcda012018-03-09 14:13:49 +0000256}
257
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000258template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000259LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
260 armnn::IWorkloadFactory& workloadFactory,
261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
262 float qScale,
263 int32_t qOffset,
264 bool biasEnabled,
265 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100266{
267 // Use common single-batch 5x5 image.
268
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000269 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100270 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
271 {
272 1, 5, 2, 3,
273 8, 7, 3, 6,
274 3, 3, 9, 1
275 });
276
277
278 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000279 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100280 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
281 4, 5, 6,
282 0, 0, 0,
283 3, 2, 1
284 });
285
286 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000287 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100288
289 const std::vector<float> outputData =
290 {
291 23, 41, 33, 21,
292 44, 65, 76, 52,
293 82, 85, 79, 42
294 };
295
296 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
297
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000298 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
299 workloadFactory,
300 memoryManager,
301 input,
302 kernel,
303 boost::multi_array<T, 1>(),
304 expectedOutput,
305 dataLayout,
306 qScale,
307 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100308}
309
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000310template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000311LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
312 armnn::IWorkloadFactory& workloadFactory,
313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
314 float qScale,
315 int32_t qOffset,
316 bool biasEnabled,
317 const armnn::DataLayout& dataLayout)
318{
319 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000320 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000321 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
322 {
323 1, 5, 2, 3, 5,
324 8, 7, 3, 6, 3,
325 3, 3, 9, 1, 9,
326 4, 1, 8, 1, 3,
327 6, 8, 1, 9, 2
328 });
329
330 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000331 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000332 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
333 {
334 4, 5, 6,
335 0, 0, 0,
336 3, 2, 1
337 });
338
339 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000340 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000341
342 const std::vector<T> outputData =
343 {
344 23, 33, 24,
345 91, 99, 48,
346 26, 50, 19
347 };
348
349 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
350
351 uint32_t padLeft = 1;
352 uint32_t padTop = 1;
353 uint32_t padRight = 1;
354 uint32_t padBottom = 1;
355 uint32_t strideX = 2;
356 uint32_t strideY = 2;
357
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000358 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
359 workloadFactory,
360 memoryManager,
361 input,
362 kernel,
363 boost::multi_array<T, 1>(),
364 expectedOutput,
365 dataLayout,
366 qScale,
367 qOffset,
368 padLeft,
369 padTop,
370 padRight,
371 padBottom,
372 strideX,
373 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000374}
375
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000376LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
377 armnn::IWorkloadFactory& workloadFactory,
378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
379 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000380 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000381{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000382 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
383 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000384}
385
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000386LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
387 armnn::IWorkloadFactory& workloadFactory,
388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
389 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000390 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000391{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000392 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
393 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000394}
395
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000396LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
397 armnn::IWorkloadFactory& workloadFactory,
398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
399 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000400 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000401{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000402 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
403 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000404}
405
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000406LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
407 armnn::IWorkloadFactory& workloadFactory,
408 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
409 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100410{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000411 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
412 workloadFactory,
413 memoryManager,
414 0.f,
415 0,
416 biasEnabled,
417 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100418}
419
Mike Kelly7332ed82018-12-20 17:03:06 +0000420LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
421 armnn::IWorkloadFactory& workloadFactory,
422 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
423 bool biasEnabled,
424 const armnn::DataLayout layout)
425{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000426 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
427 workloadFactory,
428 memoryManager,
429 0.f,
430 0,
431 biasEnabled,
432 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000433}
434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000435LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
436 armnn::IWorkloadFactory& workloadFactory,
437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
438 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000439 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000440{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000441 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
442 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000443}
444
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100445LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
446 armnn::IWorkloadFactory& workloadFactory,
447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
448 bool biasEnabled,
449 const armnn::DataLayout layout)
450{
451return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
452 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
453}
454
455LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
456 armnn::IWorkloadFactory& workloadFactory,
457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
458 bool biasEnabled,
459 const armnn::DataLayout layout)
460{
461 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
462 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
463}
464
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000465template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
466 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000467LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
468 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000470 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000471 float qScale,
472 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000473{
telsoa01c577f2c2018-08-31 09:22:23 +0100474 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000475 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000476 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
477 QuantizedVector<T>(qScale, qOffset, {
478 11,21,31,
479 12,22,32,
480 13,23,33
481 })));
482
telsoa01c577f2c2018-08-31 09:22:23 +0100483 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000484 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000485 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
486 QuantizedVector<T>(qScale, qOffset, {
487 -11,-21,
488 -12,-22,
489 })));
490
telsoa01c577f2c2018-08-31 09:22:23 +0100491// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000492// Manually calculated like this:
493//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
494//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
495//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
496//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
497//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
498//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
499//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000500 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000501 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
502 QuantizedVector<T>(qScale, qOffset, {
503 0, 0, 0, 0, 0, 0,
504 -242, -594, -934, -372, 0, 0,
505 -495, -1190, -1850, -725, 0, 0,
506 -538, -1256, -1916, -748, 0, 0,
507 -273, -626, -946, -363, 0, 0,
508 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0,
510 0, 0, 0, 0, 0, 0
511 })));
512
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000513 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
514 workloadFactory,
515 memoryManager,
516 input,
517 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100518 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000519 expectedOutput,
520 qScale,
521 qOffset,
522 layout,
523 1, // Padding left.
524 2, // Padding top.
525 3, // Padding right.
526 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000527}
528
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000529template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
530 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000531LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
532 armnn::IWorkloadFactory& workloadFactory,
533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000534 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000535 float qScale,
536 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000537{
telsoa01c577f2c2018-08-31 09:22:23 +0100538 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000539 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000540 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
541 QuantizedVector<T>(qScale, qOffset, {
542 11,21,31,41,51,
543 12,22,32,42,52,
544 13,23,33,43,53,
545 14,24,34,44,54,
546 15,25,35,45,55,
547 })));
548
telsoa01c577f2c2018-08-31 09:22:23 +0100549 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000550 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000551 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
552 QuantizedVector<T>(qScale, qOffset, {
553 -11,-21,-31,-41,
554 -12,-22,-32,-42,
555 -13,-23,-33,-43,
556 -14,-24,-34,-44,
557 })));
558
telsoa01c577f2c2018-08-31 09:22:23 +0100559 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000560 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000561 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
562 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
563 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000564 -7140, -10580, -13940, -9300, -5230,
565 -9590, -14120, -18520, -12290, -6860,
566 -9980, -14560, -18960, -12560, -7000,
567 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100568 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000569 })));
570
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000571 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
572 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000573 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000574 input,
575 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100576 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000577 expectedOutput,
578 qScale,
579 qOffset,
narpra015f703182018-10-26 16:24:58 +0100580 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100581 1, // Padding left.
582 1, // Padding top.
583 2, // Padding right.
584 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100585}
586
Teresa Charlinedeeb162019-06-14 11:09:19 +0100587LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
588 armnn::IWorkloadFactory& workloadFactory,
589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
590 armnn::DataLayout layout)
591{
592 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
593 workloadFactory, memoryManager, layout, 0.0f, 0);
594}
595
596LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
597 armnn::IWorkloadFactory& workloadFactory,
598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
599 armnn::DataLayout layout)
600{
601 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
602 <armnn::DataType::Float32, armnn::DataType::Float32>(
603 workloadFactory, memoryManager, layout, 0.0f, 0);
604}
605
606LayerTestResult<float, 4> Convolution1dTest(
607 armnn::IWorkloadFactory& workloadFactory,
608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
609 bool biasEnabled)
610{
611 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
612 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
613}
614
615LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
616 armnn::IWorkloadFactory& workloadFactory,
617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
618 bool biasEnabled)
619{
620 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
621 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
622}
623
624LayerTestResult<float,4> CompareConvolution2dTest(
625 armnn::IWorkloadFactory& workloadFactory,
626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
627 armnn::IWorkloadFactory& refWorkloadFactory)
628{
629 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
630 workloadFactory, memoryManager, refWorkloadFactory);
631}
632
633template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
634LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
635 armnn::IWorkloadFactory& workloadFactory,
636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
637 const std::vector<float>& inputNoQuantizedValues,
638 armnn::TensorInfo& inputTensorInfo,
639 const std::vector<float>& kernelNoQuantizedValues,
640 armnn::TensorInfo& kernelTensorInfo,
641 const std::vector<float>& outputExpectedNoQuantizedValues,
642 armnn::TensorInfo& outputTensorInfo,
643 uint32_t dilationX,
644 uint32_t dilationY,
645 armnn::DataLayout layout = armnn::DataLayout::NCHW,
646 bool biasEnabled = false
647)
648{
649 float qScale;
650 int32_t qOffset;
651 switch (ArmnnType)
652 {
653 case armnn::DataType::QuantisedAsymm8:
654 {
655 qScale = 0.1f;
656 qOffset = 128;
657 break;
658 }
659 case armnn::DataType::QuantisedSymm16:
660 {
661 qScale = 0.1f;
662 qOffset = 0;
663 break;
664 }
665 case armnn::DataType::Float32:
666 default:
667 {
668 qScale = 0.f;
669 qOffset = 0;
670 break;
671 }
672 }
673
674 inputTensorInfo.SetQuantizationScale(qScale);
675 inputTensorInfo.SetQuantizationOffset(qOffset);
676 kernelTensorInfo.SetQuantizationScale(qScale);
677 kernelTensorInfo.SetQuantizationOffset(qOffset);
678 outputTensorInfo.SetQuantizationScale(qScale);
679 outputTensorInfo.SetQuantizationOffset(qOffset);
680
681 auto input = MakeTensor<T, 4>(inputTensorInfo,
682 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
683 inputTensorInfo.GetQuantizationOffset(),
684 inputNoQuantizedValues)));
685 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
686 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
687 kernelTensorInfo.GetQuantizationOffset(),
688 kernelNoQuantizedValues)));
689 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
690 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
691 outputTensorInfo.GetQuantizationOffset(),
692 outputExpectedNoQuantizedValues)));
693
694 uint32_t padLeft = 0;
695 uint32_t padTop = 0;
696 uint32_t padRight = 0;
697 uint32_t padBottom = 0;
698 uint32_t strideX = 1;
699 uint32_t strideY = 1;
700
701 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
702 workloadFactory,
703 memoryManager,
704 input,
705 kernel,
706 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
707 expectedOutput,
708 qScale,
709 qOffset,
710 layout,
711 padLeft,
712 padTop,
713 padRight,
714 padBottom,
715 strideX,
716 strideY,
717 dilationX,
718 dilationY);
719}
720
721template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
722LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
723 armnn::IWorkloadFactory& workloadFactory,
724 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
725 bool biasEnabled,
726 const armnn::DataLayout layout)
727{
728 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
729 std::vector<float> inputNoQuantizedValues =
730 {
731 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
732 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
733 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
734 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
735 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
736 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
737 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
738 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
739 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
740 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
741 };
742
743 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
744 std::vector<float> kernelNoQuantizedValues =
745 {
746 1, 2, 3,
747 4, 5, 6,
748 7, 8, 9
749 };
750
751 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
752 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
753 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
754 std::vector<float> outputExpectedNoQuantizedValues =
755 {
756 6., 5., 5., 5.,
757 6., 5., 5., 5.,
758 6., 5., 5., 5.,
759 3., 2., 2., 2.
760 };
761
762 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
763 workloadFactory,
764 memoryManager,
765 inputNoQuantizedValues,
766 inputTensorInfo,
767 kernelNoQuantizedValues,
768 kernelTensorInfo,
769 outputExpectedNoQuantizedValues,
770 outputTensorInfo,
771 3,
772 3,
773 layout,
774 biasEnabled);
775}
776
777template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
778LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
779 armnn::IWorkloadFactory& workloadFactory,
780 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
781 bool biasEnabled,
782 const armnn::DataLayout layout)
783{
784 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
785 std::vector<float> inputNoQuantizedValues =
786 {
787 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
788 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
789 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
790 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
791 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
792 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
793 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
794 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
795 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
796 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
797
798 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
799 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
800 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
801 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
802 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
803 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
804 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
805 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
806 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
807 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
808 };
809
810 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
811 std::vector<float> kernelNoQuantizedValues =
812 {
813 1, 2, 3,
814 4, 5, 6,
815 7, 8, 9,
816
817 1, 2, 3,
818 4, 5, 6,
819 7, 8, 9
820 };
821
822 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
823 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
824 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
825 std::vector<float> outputExpectedNoQuantizedValues =
826 {
827 12., 10., 10., 10.,
828 12., 10., 10., 10.,
829 12., 10., 10., 10.,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100830 6., 4., 4., 4.
Teresa Charlinedeeb162019-06-14 11:09:19 +0100831 };
832
833 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
834 workloadFactory,
835 memoryManager,
836 inputNoQuantizedValues,
837 inputTensorInfo,
838 kernelNoQuantizedValues,
839 kernelTensorInfo,
840 outputExpectedNoQuantizedValues,
841 outputTensorInfo,
842 3,
843 3,
844 layout,
845 biasEnabled);
846}
847
848template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
849Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
850 armnn::IWorkloadFactory&,
851 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
852 bool,
853 armnn::DataLayout);
854
855template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
856Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
857 armnn::IWorkloadFactory&,
858 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
859 bool,
860 armnn::DataLayout);
861
862template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
863Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
864 armnn::IWorkloadFactory&,
865 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
866 bool,
867 armnn::DataLayout);
868
869template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
870Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
871 armnn::IWorkloadFactory&,
872 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
873 bool,
874 armnn::DataLayout);
875
876template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
877Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
878 armnn::IWorkloadFactory&,
879 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
880 bool,
881 armnn::DataLayout);
882
883template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
884Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
885 armnn::IWorkloadFactory&,
886 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
887 bool,
888 armnn::DataLayout);
889
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000890template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
891 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000892LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
893 armnn::IWorkloadFactory& workloadFactory,
894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
895 float qScale,
896 int32_t qOffset,
897 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000898 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100899{
telsoa01c577f2c2018-08-31 09:22:23 +0100900 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000901 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100902 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100903 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
904 {
surmeh013537c2c2018-05-18 16:31:43 +0100905 0, 1, 2, 3, 4,
906 5, 6, 7, 8, 9,
907 10, 11, 12, 13, 14,
908 15, 16, 17, 18, 19,
909 20, 21, 22, 23, 24,
910
911 25, 26, 27, 28, 29,
912 30, 31, 32, 33, 34,
913 35, 36, 37, 38, 39,
914 40, 41, 42, 43, 44,
915 45, 46, 47, 48, 49
916 })));
917
telsoa01c577f2c2018-08-31 09:22:23 +0100918 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000919 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100920 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100921 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
922 {
surmeh013537c2c2018-05-18 16:31:43 +0100923 32, 31, 30, 29,
924 28, 27, 26, 25,
925 24, 23, 22, 21,
926 20, 19, 18, 17,
927
928 16, 15, 14, 13,
929 12, 11, 10, 9,
930 8, 7, 6, 5,
931 4, 3, 2, 1
932 })));
933
telsoa01c577f2c2018-08-31 09:22:23 +0100934 // Expected output is 1 batch of a 2-channel 5x5 image.
935 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000936 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100937 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100938 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
939 {
surmeh013537c2c2018-05-18 16:31:43 +0100940 1062, 1580, 1850, 1530, 1117,
941 2140, 3108, 3500, 2842, 2042,
942 3580, 5068, 5460, 4342, 3062,
943 3618, 5072, 5390, 4248, 2971,
944 3074, 4282, 4510, 3533, 2457,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100945
surmeh013537c2c2018-05-18 16:31:43 +0100946 1550, 2284, 2362, 1955, 1428,
947 2910, 4206, 4342, 3528, 2536,
948 3390, 4886, 5022, 4068, 2916,
949 3566, 5056, 5182, 4133, 2922,
950 3100, 4352, 4452, 3517, 2465
951 })));
952
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000953 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
954 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000955 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100956 input,
957 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100958 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +0100959 expectedOutput,
960 qScale,
961 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100962 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100963 1, // Padding left.
964 1, // Padding top.
965 2, // Padding right.
966 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100967 1, // strideX
968 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000969}
970
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000971template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
972 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000973LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
974 armnn::IWorkloadFactory& workloadFactory,
975 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
976 float qScale,
977 int32_t qOffset,
978 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100979{
Teresa Charlin20b1f882019-06-19 09:34:37 +0100980 auto layout = armnn::DataLayout::NHWC;
981
982 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100983 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100984 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
985 {
986 0, 1, 2, 3, 4,
987 5, 6, 7, 8, 9,
988 10, 11, 12, 13, 14,
989 15, 16, 17, 18, 19,
990 20, 21, 22, 23, 24,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100991
Teresa Charlin20b1f882019-06-19 09:34:37 +0100992 25, 26, 27, 28, 29,
993 30, 31, 32, 33, 34,
994 35, 36, 37, 38, 39,
995 40, 41, 42, 43, 44,
996 45, 46, 47, 48, 49
Nikhil Rajcec6b652018-10-12 13:51:57 +0100997 })));
998
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000999 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001000 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001001 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1002 {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001003 32, 31, 30, 29,
1004 28, 27, 26, 25,
1005 24, 23, 22, 21,
1006 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001007
Matteo Martincigh747ef822018-12-18 09:26:39 +00001008 16, 15, 14, 13,
1009 12, 11, 10, 9,
1010 8, 7, 6, 5,
1011 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001012 })));
1013
Teresa Charlin20b1f882019-06-19 09:34:37 +01001014 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001015 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001016 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1017 {
1018 1062, 1580, 1850, 1530, 1117,
1019 2140, 3108, 3500, 2842, 2042,
1020 3580, 5068, 5460, 4342, 3062,
1021 3618, 5072, 5390, 4248, 2971,
1022 3074, 4282, 4510, 3533, 2457,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001023
Teresa Charlin20b1f882019-06-19 09:34:37 +01001024 1550, 2284, 2362, 1955, 1428,
1025 2910, 4206, 4342, 3528, 2536,
1026 3390, 4886, 5022, 4068, 2916,
1027 3566, 5056, 5182, 4133, 2922,
1028 3100, 4352, 4452, 3517, 2465
Nikhil Rajcec6b652018-10-12 13:51:57 +01001029 })));
1030
Teresa Charlin20b1f882019-06-19 09:34:37 +01001031 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001032 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001033 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001034 input,
1035 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001036 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001037 expectedOutput,
1038 qScale,
1039 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001040 layout,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001041 1, // Padding left.
1042 1, // Padding top.
1043 2, // Padding right.
1044 2, // Padding bottom.
1045 1, // strideX
1046 1); // strideY
1047}
1048
Bruno Goncalves22972f02019-04-26 21:03:24 -03001049template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1050 typename T = armnn::ResolveType<ArmnnType>>
1051LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1052 armnn::IWorkloadFactory& workloadFactory,
1053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1054 float qScale,
1055 int32_t qOffset,
1056 bool biasEnabled)
1057{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001058 auto layout = armnn::DataLayout::NHWC;
1059
1060 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001061 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001062 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1063 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001064 0, 0, 0, 0, 0, 0, 0, 0, 0,
1065 0, 0, 0, 0, 0, 0, 0, 0, 0,
1066 0, 0, 0, 0, 0, 0, 0, 0, 0,
1067 0, 0, 0, 1, 1, 1, 0, 0, 0,
1068 0, 0, 0, 1, 1, 1, 0, 0, 0,
1069 0, 0, 0, 1, 1, 1, 0, 0, 0,
1070 0, 0, 0, 0, 0, 0, 0, 0, 0,
1071 0, 0, 0, 0, 0, 0, 0, 0, 0,
1072 0, 0, 0, 0, 0, 0, 0, 0, 0
1073 })));
1074
1075 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1076 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001077 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1078 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001079 1, 2, 3,
1080 4, 5, 6,
1081 7, 8, 9
1082 })));
1083
1084 uint32_t padLeft = 0;
1085 uint32_t padTop = 0;
1086 uint32_t padRight = 0;
1087 uint32_t padBottom = 0;
1088 uint32_t strideX = 1;
1089 uint32_t strideY = 1;
1090 uint32_t dilationX = 3;
1091 uint32_t dilationY = 3;
1092
1093 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Teresa Charlin20b1f882019-06-19 09:34:37 +01001094 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001095 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001096 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1097 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001098 5, 5, 5,
1099 5, 5, 5,
1100 5, 5, 5
1101 })));
1102
Teresa Charlin20b1f882019-06-19 09:34:37 +01001103 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Bruno Goncalves22972f02019-04-26 21:03:24 -03001104 workloadFactory,
1105 memoryManager,
1106 input,
1107 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001108 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001109 expectedOutput,
1110 qScale,
1111 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001112 layout,
Bruno Goncalves22972f02019-04-26 21:03:24 -03001113 padLeft,
1114 padTop,
1115 padRight,
1116 padBottom,
1117 strideX,
1118 strideY,
1119 dilationX,
1120 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001121}
1122
Teresa Charlin20b1f882019-06-19 09:34:37 +01001123
1124template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1125LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1126 armnn::IWorkloadFactory& workloadFactory,
1127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1128 const std::vector<float>& inputNoQuantizedValues,
1129 armnn::TensorInfo& inputTensorInfo,
1130 const std::vector<float>& kernelNoQuantizedValues,
1131 armnn::TensorInfo& kernelTensorInfo,
1132 const std::vector<float>& outputExpectedNoQuantizedValues,
1133 armnn::TensorInfo& outputTensorInfo,
1134 uint32_t dilationX,
1135 uint32_t dilationY,
1136 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1137 bool biasEnabled = false)
1138{
1139 float qScale;
1140 int32_t qOffset;
1141 switch (ArmnnType)
1142 {
1143 case armnn::DataType::QuantisedAsymm8:
1144 {
1145 qScale = 0.1f;
1146 qOffset = 128;
1147 break;
1148 }
1149 case armnn::DataType::QuantisedSymm16:
1150 {
1151 qScale = 0.1f;
1152 qOffset = 0;
1153 break;
1154 }
1155 case armnn::DataType::Float32:
1156 default:
1157 {
1158 qScale = 0.f;
1159 qOffset = 0;
1160 break;
1161 }
1162 }
1163
1164 inputTensorInfo.SetQuantizationScale(qScale);
1165 inputTensorInfo.SetQuantizationOffset(qOffset);
1166 kernelTensorInfo.SetQuantizationScale(qScale);
1167 kernelTensorInfo.SetQuantizationOffset(qOffset);
1168 outputTensorInfo.SetQuantizationScale(qScale);
1169 outputTensorInfo.SetQuantizationOffset(qOffset);
1170
1171 auto input = MakeTensor<T, 4>(inputTensorInfo,
1172 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1173 inputTensorInfo.GetQuantizationOffset(),
1174 inputNoQuantizedValues)));
1175 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1176 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1177 kernelTensorInfo.GetQuantizationOffset(),
1178 kernelNoQuantizedValues)));
1179 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1180 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1181 outputTensorInfo.GetQuantizationOffset(),
1182 outputExpectedNoQuantizedValues)));
1183
1184 uint32_t padLeft = 0;
1185 uint32_t padTop = 0;
1186 uint32_t padRight = 0;
1187 uint32_t padBottom = 0;
1188 uint32_t strideX = 1;
1189 uint32_t strideY = 1;
1190
1191 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1192 workloadFactory,
1193 memoryManager,
1194 input,
1195 kernel,
1196 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1197 expectedOutput,
1198 qScale,
1199 qOffset,
1200 layout,
1201 padLeft,
1202 padTop,
1203 padRight,
1204 padBottom,
1205 strideX,
1206 strideY,
1207 dilationX,
1208 dilationY);
1209}
1210
1211template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1212LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1213 armnn::IWorkloadFactory& workloadFactory,
1214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1215 bool biasEnabled,
1216 const armnn::DataLayout layout)
1217{
1218 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1219 std::vector<float> inputNoQuantizedValues =
1220 {
1221 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1222 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1223 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1224 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1225 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1226 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1227 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1228 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1230 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1231 };
1232
1233 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1234 std::vector<float> kernelNoQuantizedValues =
1235 {
1236 1, 2, 3,
1237 4, 5, 6,
1238 7, 8, 9
1239 };
1240
1241 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1242 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1243 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1244 std::vector<float> outputExpectedNoQuantizedValues =
1245 {
1246 6., 5., 5., 5.,
1247 6., 5., 5., 5.,
1248 6., 5., 5., 5.,
1249 3., 2., 2., 2.
1250 };
1251
1252 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1253 workloadFactory,
1254 memoryManager,
1255 inputNoQuantizedValues,
1256 inputTensorInfo,
1257 kernelNoQuantizedValues,
1258 kernelTensorInfo,
1259 outputExpectedNoQuantizedValues,
1260 outputTensorInfo,
1261 3,
1262 3,
1263 layout,
1264 biasEnabled);
1265}
1266
1267template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1268LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1269 armnn::IWorkloadFactory& workloadFactory,
1270 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1271 bool biasEnabled,
1272 const armnn::DataLayout layout)
1273{
1274 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1275 std::vector<float> inputNoQuantizedValues =
1276 {
1277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1280 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1281 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1282 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1283 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1284 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1285 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1287
1288 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1289 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1290 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1291 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1292 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1293 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1294 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1296 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1297 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1298 };
1299
1300 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1301 std::vector<float> kernelNoQuantizedValues =
1302 {
1303 1, 2, 3,
1304 4, 5, 6,
1305 7, 8, 9,
1306
1307 1, 2, 3,
1308 4, 5, 6,
1309 7, 8, 9
1310 };
1311
1312 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1313 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1314 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1315 std::vector<float> outputExpectedNoQuantizedValues =
1316 {
1317 6., 5., 5., 5.,
1318 6., 5., 5., 5.,
1319 6., 5., 5., 5.,
1320 3., 2., 2., 2.,
1321
1322 6., 5., 5., 5.,
1323 6., 5., 5., 5.,
1324 6., 5., 5., 5.,
1325 3., 2., 2., 2.
1326 };
1327
1328 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1329 workloadFactory,
1330 memoryManager,
1331 inputNoQuantizedValues,
1332 inputTensorInfo,
1333 kernelNoQuantizedValues,
1334 kernelTensorInfo,
1335 outputExpectedNoQuantizedValues,
1336 outputTensorInfo,
1337 3,
1338 3,
1339 layout,
1340 biasEnabled);
1341}
1342
1343
1344template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1345DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1346 armnn::IWorkloadFactory&,
1347 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1348 bool,
1349 armnn::DataLayout);
1350
1351template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1352DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1353 armnn::IWorkloadFactory&,
1354 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1355 bool,
1356 armnn::DataLayout);
1357
1358template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1359DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1360 armnn::IWorkloadFactory&,
1361 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1362 bool,
1363 armnn::DataLayout);
1364
1365template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1366DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1367 armnn::IWorkloadFactory&,
1368 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1369 bool,
1370 armnn::DataLayout);
1371
1372template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1373DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1374 armnn::IWorkloadFactory&,
1375 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1376 bool,
1377 armnn::DataLayout);
1378
1379template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1380DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1381 armnn::IWorkloadFactory&,
1382 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1383 bool,
1384 armnn::DataLayout);
1385
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001386LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1387 armnn::IWorkloadFactory& workloadFactory,
1388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1389 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001390 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001391{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001392 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001393 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001394}
1395
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001396LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1397 armnn::IWorkloadFactory& workloadFactory,
1398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1399 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001401 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1402 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001403}
1404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001405LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1406 armnn::IWorkloadFactory& workloadFactory,
1407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1408 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001409 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001410{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001411 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001412 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001413}
1414
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001415LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1416 armnn::IWorkloadFactory& workloadFactory,
1417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1418 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001419 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001420{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001421 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001422 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001423}
1424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001425LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1426 armnn::IWorkloadFactory& workloadFactory,
1427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1428 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001429 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001430{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001431 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001432 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001433}
1434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001435LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1436 armnn::IWorkloadFactory& workloadFactory,
1437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1438 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001439 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001440{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001441 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001442 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001443}
1444
Bruno Goncalves22972f02019-04-26 21:03:24 -03001445LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1446 armnn::IWorkloadFactory& workloadFactory,
1447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1448{
1449 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001450 workloadFactory,
1451 memoryManager,
1452 0.f,
1453 0,
1454 false);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001455}
1456
Ruomei Yan88d44b82019-05-23 14:29:06 +01001457LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1458 armnn::IWorkloadFactory& workloadFactory,
1459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1460 bool biasEnabled,
1461 const armnn::DataLayout layout)
1462{
1463 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1464 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1465}
1466
1467LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1468 armnn::IWorkloadFactory& workloadFactory,
1469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1470 bool biasEnabled,
1471 const armnn::DataLayout layout)
1472{
1473 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1474 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1475}
1476
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001477LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001478 armnn::IWorkloadFactory& workloadFactory,
1479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1480 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001481 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001482{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001483 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1484 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001485}
1486
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001487LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1488 armnn::IWorkloadFactory& workloadFactory,
1489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1490 armnn::IWorkloadFactory& refWorkloadFactory,
1491 const armnn::DataLayout layout)
1492{
1493 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1494 workloadFactory, memoryManager, refWorkloadFactory, layout);
1495}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001496
1497LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1498 armnn::IWorkloadFactory& workloadFactory,
1499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001500{
1501 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1502 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001503 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001504}
1505
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001506LayerTestResult<float,4> SimpleNormalizationWithinTest(
1507 armnn::IWorkloadFactory& workloadFactory,
1508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001509{
1510 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1511 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001512 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001513}
1514
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001515LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1516 armnn::IWorkloadFactory& workloadFactory,
1517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001518{
1519 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1520 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001521 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001522}
1523
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001524LayerTestResult<float,2> SimpleSoftmaxTest(
1525 armnn::IWorkloadFactory& workloadFactory,
1526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1527 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001528{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001529 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001530}
1531
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001532LayerTestResult<float,3> Simple3dSoftmaxTest(
1533 armnn::IWorkloadFactory& workloadFactory,
1534 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1535 float beta)
1536{
1537 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1538}
1539
1540LayerTestResult<float,4> Simple4dSoftmaxTest(
1541 armnn::IWorkloadFactory& workloadFactory,
1542 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1543 float beta)
1544{
1545 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1546}
1547
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001548LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1549 armnn::IWorkloadFactory& workloadFactory,
1550 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1551 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001552{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001553 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001554}
1555
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001556LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1557 armnn::IWorkloadFactory& workloadFactory,
1558 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1559 float beta)
1560{
1561 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1562}
1563
1564LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1565 armnn::IWorkloadFactory& workloadFactory,
1566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1567 float beta)
1568{
1569 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1570}
1571
nikraj01248683f2019-05-29 16:46:50 +01001572LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1573 armnn::IWorkloadFactory& workloadFactory,
1574 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1575 float beta)
1576{
1577 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1578}
1579
1580LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1581 armnn::IWorkloadFactory& workloadFactory,
1582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1583 float beta)
1584{
1585 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1586}
1587
1588LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
1589 armnn::IWorkloadFactory& workloadFactory,
1590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1591 float beta)
1592{
1593 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1594}
1595
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001596LayerTestResult<float,4> CompareNormalizationTest(
1597 armnn::IWorkloadFactory& workloadFactory,
1598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1599 armnn::IWorkloadFactory& refWorkloadFactory,
1600 armnn::NormalizationAlgorithmChannel normChannel,
1601 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001602{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001603 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001604}
1605
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001606LayerTestResult<float,2> CompareSoftmaxTest(
1607 armnn::IWorkloadFactory& workloadFactory,
1608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001609 armnn::IWorkloadFactory& refWorkloadFactory,
1610 float beta)
1611{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001612 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1613 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001614}
1615
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001616LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1617 armnn::IWorkloadFactory& workloadFactory,
1618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001619 armnn::IWorkloadFactory& refWorkloadFactory,
1620 float beta)
1621{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001622 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1623 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001624}
1625
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001626std::vector<LayerTestResult<float,3>> SplitterTest(
1627 armnn::IWorkloadFactory& workloadFactory,
1628 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001629{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001630 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001631}
1632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001633std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1634 armnn::IWorkloadFactory& workloadFactory,
1635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001636{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001637 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001638}
1639
Ruomei Yan25339c32019-05-28 16:48:20 +01001640std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
1641 armnn::IWorkloadFactory& workloadFactory,
1642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1643{
1644 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1645}
1646
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001647LayerTestResult<float, 3> CopyViaSplitterTest(
1648 armnn::IWorkloadFactory& workloadFactory,
1649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001650{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001651 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001652}
1653
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001654LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1655 armnn::IWorkloadFactory& workloadFactory,
1656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001657{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001658 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001659}
1660
Ruomei Yan25339c32019-05-28 16:48:20 +01001661LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
1662 armnn::IWorkloadFactory& workloadFactory,
1663 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1664{
1665 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1666}
1667
telsoa01c577f2c2018-08-31 09:22:23 +01001668LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001669 armnn::IWorkloadFactory& workloadFactory,
1670 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001671{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001672 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001673 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1674 { 2., 3., 3., 4. }));
1675
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001676 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001677 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1678 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1679 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001680 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001681 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001682}
1683
1684LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001685 armnn::IWorkloadFactory& workloadFactory,
1686 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001687{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001688 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001689 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1690 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1691 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1692
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001693 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001694 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1695 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1696 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1697 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1698 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1699 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1700 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1701 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001702 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1703 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001704}
1705
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001706LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1707 armnn::IWorkloadFactory& workloadFactory,
1708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001709{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001710 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001711 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1712 {2., 3., 3., 4.}));
1713
1714
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001715 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001716 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1717 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1718 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1719
Conor Kennedyb9971c92019-05-07 07:14:23 +01001720 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001721 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001722}
1723
Conor Kennedyb9971c92019-05-07 07:14:23 +01001724LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1725 armnn::IWorkloadFactory& workloadFactory,
1726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1727{
1728 const float qScale = 1.0f;
1729 const int32_t qOffset = 0;
1730
1731 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1732 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1733
1734 armnn::TensorInfo inputDesc({2, 2}, datatype);
1735 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1736 std::vector<float>{2., 3., 3., 4.}));
1737
1738 armnn::TensorInfo outputDesc({2, 4}, datatype);
1739 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1740 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1741 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1742
1743 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1744 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1745
1746}
1747
1748LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1749 armnn::IWorkloadFactory& workloadFactory,
1750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1751{
1752 const float qScale = 1.0f;
1753 const int32_t qOffset = 0;
1754
1755 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1756 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1757
1758 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1759 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1760 std::vector<float>({ 2., 3., 3., 4. })));
1761
1762 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1763 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1764 qOffset, std::vector<float>(
1765 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1766 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1767
1768 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
1769 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1770}
1771
1772LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
1773 armnn::IWorkloadFactory& workloadFactory,
1774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1775{
1776 const float qScale = 2.0f;
1777 const int32_t qOffset = 0;
1778
1779 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1780 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1781
1782 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
1783 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1784 qOffset, std::vector<float>(
1785 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1786 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
1787
1788 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
1789 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1790 qOffset, std::vector<float>(
1791 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1792 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1793 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1794 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1795 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1796 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
1797
1798 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
1799 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1800}
1801
1802LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
1803 armnn::IWorkloadFactory& workloadFactory,
1804 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1805{
1806 const float qScale = 1.0f;
1807 const int32_t qOffset = 0;
1808
1809 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
1810
1811 armnn::TensorInfo inputDesc({2, 2}, datatype);
1812 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1813 qOffset, std::vector<float>{2., 3., 3., 4.}));
1814
1815 armnn::TensorInfo outputDesc({2, 4}, datatype);
1816 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1817 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1818 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1819
1820 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1821 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
1822}
1823
Jim Flynn4ed6c832019-05-20 11:02:46 +01001824LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001825 armnn::IWorkloadFactory& workloadFactory,
1826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001827{
surmeh013537c2c2018-05-18 16:31:43 +01001828 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001829 unsigned int outputHeight = 6;
1830 unsigned int outputChannels = 3;
1831
surmeh013537c2c2018-05-18 16:31:43 +01001832 unsigned int inputWidth1 = 3;
1833 unsigned int inputHeight1 = 6;
1834 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001835
surmeh013537c2c2018-05-18 16:31:43 +01001836 unsigned int inputWidth2 = 3;
1837 unsigned int inputHeight2 = 6;
1838 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001839
telsoa01c577f2c2018-08-31 09:22:23 +01001840 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001841 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1842 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1843 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001844
1845 LayerTestResult<float,3> ret(outputTensorInfo);
1846
telsoa014fcda012018-03-09 14:13:49 +00001847 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001848 {
1849 1.0f, 2.0f, 3.0f,
1850 4.0f, 5.0f, 6.0f,
1851 7.0f, 8.0f, 9.0f,
1852 10.0f, 11.0f, 12.0f,
1853 13.0f, 14.0f, 15.0f,
1854 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001855
surmeh013537c2c2018-05-18 16:31:43 +01001856 19.0f, 20.0f, 21.0f,
1857 22.0f, 23.0f, 24.0f,
1858 25.0f, 26.0f, 27.0f,
1859 28.0f, 29.0f, 30.0f,
1860 31.0f, 32.0f, 33.0f,
1861 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001862
surmeh013537c2c2018-05-18 16:31:43 +01001863 37.0f, 38.0f, 39.0f,
1864 40.0f, 41.0f, 42.0f,
1865 43.0f, 44.0f, 45.0f,
1866 46.0f, 47.0f, 48.0f,
1867 49.0f, 50.0f, 51.0f,
1868 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001869 })
1870 );
1871
telsoa014fcda012018-03-09 14:13:49 +00001872 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1873 {
surmeh013537c2c2018-05-18 16:31:43 +01001874 1.0f, 2.0f, 3.0f,
1875 4.0f, 5.0f, 6.0f,
1876 7.0f, 8.0f, 9.0f,
1877 10.0f, 11.0f, 12.0f,
1878 13.0f, 14.0f, 15.0f,
1879 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001880
surmeh013537c2c2018-05-18 16:31:43 +01001881 19.0f, 20.0f, 21.0f,
1882 22.0f, 23.0f, 24.0f,
1883 25.0f, 26.0f, 27.0f,
1884 28.0f, 29.0f, 30.0f,
1885 31.0f, 32.0f, 33.0f,
1886 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001887 })
1888 );
1889
1890 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1891 {
surmeh013537c2c2018-05-18 16:31:43 +01001892 37.0f, 38.0f, 39.0f,
1893 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001894 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001895 46.0f, 47.0f, 48.0f,
1896 49.0f, 50.0f, 51.0f,
1897 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001898 })
1899 );
1900
telsoa01c577f2c2018-08-31 09:22:23 +01001901 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01001902 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00001903
telsoa01c577f2c2018-08-31 09:22:23 +01001904 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01001905 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00001906
telsoa014fcda012018-03-09 14:13:49 +00001907 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1908
1909 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1910
1911 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1912 subTensorsSupported ?
1913 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1914 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1915
1916 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1917 subTensorsSupported ?
1918 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1919 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1920
Jim Flynne242f2d2019-05-22 14:24:13 +01001921 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00001922 armnn::WorkloadInfo info;
1923 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1924 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001925 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1926
1927 data.m_ViewOrigins.push_back(window1);
1928 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001929
Jim Flynn4ed6c832019-05-20 11:02:46 +01001930 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00001931
1932 inputHandle1->Allocate();
1933 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001934 outputHandle->Allocate();
1935
1936 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1937 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001938
Derek Lambertif30f7d32019-04-09 10:25:02 +01001939 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001940 workload->Execute();
1941
1942 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1943
1944 return ret;
1945}
1946
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001947LayerTestResult<float,4> AdditionTest(
1948 armnn::IWorkloadFactory& workloadFactory,
1949 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001950{
1951 unsigned int batchSize = 2;
1952 unsigned int channels = 2;
1953 unsigned int height = 2;
1954 unsigned int width = 3;
1955
1956 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1957 armnn::TensorInfo outputTensorInfo;
1958
1959 unsigned int shape[] = {batchSize, channels, height, width};
1960
1961 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1962 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1963 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1964
1965
1966 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1967 {
1968 0.0f, 2.0f, 1.0f,
1969 0.2f, 1.0f, 2.0f,
1970
1971 1.0f, 2.0f, 1.0f,
1972 0.2f, 1.0f, 2.0f,
1973
1974 0.0f, 2.0f, 1.0f,
1975 4.2f, 1.0f, 2.0f,
1976
1977 0.0f, 0.0f, 1.0f,
1978 0.2f, 1.0f, 2.0f,
1979 }));
1980
1981 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1982 {
1983 1.0f, 2.0f, 1.0f,
1984 0.0f, 1.0f, 2.0f,
1985
1986 1.0f, 2.0f, -2.0f,
1987 0.2f, 1.0f, 2.0f,
1988
1989 0.0f, 2.0f, 1.0f,
1990 4.2f, 0.0f, -3.0f,
1991
1992 0.0f, 0.0f, 1.0f,
1993 0.7f, 1.0f, 5.0f,
1994 }));
1995
1996 LayerTestResult<float,4> ret(outputTensorInfo);
1997 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1998 {
1999 1.0f, 4.0f, 2.0f,
2000 0.2f, 2.0f, 4.0f,
2001
2002 2.0f, 4.0f, -1.0f,
2003 0.4f, 2.0f, 4.0f,
2004
2005 0.0f, 4.0f, 2.0f,
2006 8.4f, 1.0f, -1.0f,
2007
2008 0.0f, 0.0f, 2.0f,
2009 0.9f, 2.0f, 7.0f,
2010 }));
2011
2012 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2013 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2014 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2015
2016 armnn::AdditionQueueDescriptor data;
2017 armnn::WorkloadInfo info;
2018 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2019 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2020 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2021
2022 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2023
2024 inputHandle1->Allocate();
2025 inputHandle2->Allocate();
2026 outputHandle->Allocate();
2027
2028 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2029 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2030
Derek Lambertif30f7d32019-04-09 10:25:02 +01002031 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002032 workload->Execute();
2033
2034 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2035
2036 return ret;
2037}
2038
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002039template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002040LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2041 armnn::IWorkloadFactory& workloadFactory,
2042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002043 float qScale,
2044 int32_t qOffset)
2045{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002046 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2047 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2048 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002049
2050 if (armnn::IsQuantizedType<T>())
2051 {
2052 inputTensorInfo1.SetQuantizationScale(qScale);
2053 inputTensorInfo1.SetQuantizationOffset(qOffset);
2054 inputTensorInfo2.SetQuantizationScale(qScale);
2055 inputTensorInfo2.SetQuantizationOffset(qOffset);
2056 outputTensorInfo.SetQuantizationScale(qScale);
2057 outputTensorInfo.SetQuantizationOffset(qOffset);
2058 }
2059
2060 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2061 {
2062 0.0f,
2063 1.0f,
2064
2065 2.0f,
2066 3.0f,
2067
2068 4.0f,
2069 5.0f,
2070 }));
2071
2072 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2073 {
2074 0.5f, 1.5f, 2.5f,
2075 3.5f, 4.5f, 5.5f,
2076 }));
2077
2078 LayerTestResult<T,4> ret(outputTensorInfo);
2079 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2080 {
2081 0.5f, 1.5f, 2.5f,
2082 4.5f, 5.5f, 6.5f,
2083
2084 2.5f, 3.5f, 4.5f,
2085 6.5f, 7.5f, 8.5f,
2086
2087 4.5f, 5.5f, 6.5f,
2088 8.5f, 9.5f, 10.5f,
2089 }));
2090
2091 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2092 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2093 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2094
2095 armnn::AdditionQueueDescriptor data;
2096 armnn::WorkloadInfo info;
2097 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2098 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2099 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2100
2101 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2102
2103 inputHandle1->Allocate();
2104 inputHandle2->Allocate();
2105 outputHandle->Allocate();
2106
2107 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2108 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2109
Derek Lambertif30f7d32019-04-09 10:25:02 +01002110 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002111 workload->Execute();
2112
2113 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2114
2115 return ret;
2116}
2117
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002118template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002119LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2120 armnn::IWorkloadFactory& workloadFactory,
2121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002122 float qScale,
2123 int32_t qOffset)
2124{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002125 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2126 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2127 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002128
2129 if (armnn::IsQuantizedType<T>())
2130 {
2131 inputTensorInfo1.SetQuantizationScale(qScale);
2132 inputTensorInfo1.SetQuantizationOffset(qOffset);
2133 inputTensorInfo2.SetQuantizationScale(qScale);
2134 inputTensorInfo2.SetQuantizationOffset(qOffset);
2135 outputTensorInfo.SetQuantizationScale(qScale);
2136 outputTensorInfo.SetQuantizationOffset(qOffset);
2137 }
2138
2139 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2140 {
2141 0.0f, 1.0f, 2.0f,
2142 3.0f, 4.0f, 5.0f,
2143 6.0f, 7.0f, 8.0f,
2144 9.0f, 10.0f, 11.0f,
2145 12.0f, 13.0f, 14.0f,
2146 15.0f, 16.0f, 17.0f,
2147 }));
2148
2149 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2150 {
2151 0.5f,
2152 }));
2153
2154 LayerTestResult<T,4> ret(outputTensorInfo);
2155 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2156 {
2157 0.5f, 1.5f, 2.5f,
2158 3.5f, 4.5f, 5.5f,
2159 6.5f, 7.5f, 8.5f,
2160 9.5f, 10.5f, 11.5f,
2161 12.5f, 13.5f, 14.5f,
2162 15.5f, 16.5f, 17.5f,
2163 }));
2164
2165 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2166 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2167 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2168
2169 armnn::AdditionQueueDescriptor data;
2170 armnn::WorkloadInfo info;
2171 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2172 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2173 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2174
2175 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2176
2177 inputHandle1->Allocate();
2178 inputHandle2->Allocate();
2179 outputHandle->Allocate();
2180
2181 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2182 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2183
Derek Lambertif30f7d32019-04-09 10:25:02 +01002184 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002185 workload->Execute();
2186
2187 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2188
2189 return ret;
2190}
2191
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002192LayerTestResult<float, 4> AdditionBroadcastTest(
2193 armnn::IWorkloadFactory& workloadFactory,
2194 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002195{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002196 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2197 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002198}
2199
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002200LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2201 armnn::IWorkloadFactory& workloadFactory,
2202 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002203{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002204 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2205 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002206}
2207
Sadik Armagan2999a022019-04-09 14:20:12 +01002208LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2209 armnn::IWorkloadFactory& workloadFactory,
2210 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2211{
2212 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2213 workloadFactory, memoryManager, 2.f, 0);
2214}
2215
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002216LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2217 armnn::IWorkloadFactory& workloadFactory,
2218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002219{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002220 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2221 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002222}
2223
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002224LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2225 armnn::IWorkloadFactory& workloadFactory,
2226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002227{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002228 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2229 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00002230}
2231
Sadik Armagan2999a022019-04-09 14:20:12 +01002232LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2233 armnn::IWorkloadFactory& workloadFactory,
2234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2235{
2236 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2237 workloadFactory, memoryManager, 0.1333333f, 0);
2238}
2239
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002240LayerTestResult<float,4> CompareAdditionTest(
2241 armnn::IWorkloadFactory& workloadFactory,
2242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2243 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002244{
2245 unsigned int batchSize = 4;
2246 unsigned int channels = 1;
2247 unsigned int height = 2;
2248 unsigned int width = 3;
2249
2250 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2251 armnn::TensorInfo outputTensorInfo;
2252
2253 unsigned int shape[] = {batchSize, channels, height, width};
2254
2255 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2256 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2257 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2258
2259 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2260 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2261
2262 LayerTestResult<float,4> ret(outputTensorInfo);
2263
2264 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2265 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2266 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2267
2268 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2269 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2270 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2271
2272 armnn::AdditionQueueDescriptor data;
2273 armnn::WorkloadInfo info;
2274 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2275 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2276 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2277
2278 armnn::AdditionQueueDescriptor refData = data;
2279 armnn::WorkloadInfo refInfo = info;
2280 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2281 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2282 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2283
2284 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2285 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2286
2287 inputHandle1->Allocate();
2288 inputHandle2->Allocate();
2289 outputHandle->Allocate();
2290 inputHandle1Ref->Allocate();
2291 inputHandle2Ref->Allocate();
2292 outputHandleRef->Allocate();
2293
2294 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2295 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2296 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2297 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2298
Derek Lambertif30f7d32019-04-09 10:25:02 +01002299 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002300 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01002301 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002302 workloadRef->Execute();
2303
2304 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2305 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2306
2307 return ret;
2308}
2309
surmeh01bceff2f2018-03-29 16:29:27 +01002310namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01002311template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002312LayerTestResult<T, 4> DivisionTestHelper(
2313 armnn::IWorkloadFactory& workloadFactory,
2314 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2315 const unsigned int shape0[4],
2316 const std::vector<T>& values0,
2317 float scale0,
2318 int32_t offset0,
2319 const unsigned int shape1[4],
2320 const std::vector<T> & values1,
2321 float scale1,
2322 int32_t offset1,
2323 const unsigned int outShape[4],
2324 const std::vector<T> & outValues,
2325 float outScale,
2326 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01002327{
Sadik Armagan2999a022019-04-09 14:20:12 +01002328 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2329 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2330 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002331
David Beck5cd01f32018-09-12 16:00:08 +01002332 inputTensorInfo0.SetQuantizationScale(scale0);
2333 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002334
David Beck5cd01f32018-09-12 16:00:08 +01002335 inputTensorInfo1.SetQuantizationScale(scale1);
2336 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002337
David Beck5cd01f32018-09-12 16:00:08 +01002338 outputTensorInfo.SetQuantizationScale(outScale);
2339 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002340
David Beck5cd01f32018-09-12 16:00:08 +01002341 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2342 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002343
David Beck5cd01f32018-09-12 16:00:08 +01002344 LayerTestResult<T, 4> result(outputTensorInfo);
2345 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002346
David Beck5cd01f32018-09-12 16:00:08 +01002347 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2348 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2349 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002350
David Beck5cd01f32018-09-12 16:00:08 +01002351 armnn::DivisionQueueDescriptor data;
2352 armnn::WorkloadInfo info;
2353 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2354 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2355 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002356
David Beck5cd01f32018-09-12 16:00:08 +01002357 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002358
David Beck5cd01f32018-09-12 16:00:08 +01002359 inputHandle0->Allocate();
2360 inputHandle1->Allocate();
2361 outputHandle->Allocate();
2362
2363 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2364 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2365
Derek Lambertif30f7d32019-04-09 10:25:02 +01002366 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01002367 workload->Execute();
2368
2369 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2370
2371 return result;
2372}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002373} // anonymous namespace
2374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002375LayerTestResult<float,4> DivisionByZeroTest(
2376 armnn::IWorkloadFactory& workloadFactory,
2377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002378{
2379 const unsigned int width = 2;
2380 const unsigned int height = 2;
2381 const unsigned int channelCount = 2;
2382 const unsigned int batchSize = 2;
2383
2384 unsigned int shape[] = { batchSize, channelCount, height, width };
2385
2386 std::vector<float> input0({
2387 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
2388 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
2389
2390 std::vector<float> input1({
2391 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
2392 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
2393
2394 std::vector<float> output({
2395 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
2396 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
2397
Sadik Armagan2999a022019-04-09 14:20:12 +01002398 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2399 memoryManager,
2400 shape, input0, 1.0f, 0,
2401 shape, input1, 1.0f, 0,
2402 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002403}
2404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002405LayerTestResult<float,4> DivisionTest(
2406 armnn::IWorkloadFactory& workloadFactory,
2407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002408{
2409 const unsigned int width = 2;
2410 const unsigned int height = 2;
2411 const unsigned int channelCount = 2;
2412 const unsigned int batchSize = 2;
2413
2414 unsigned int shape[] = { batchSize, channelCount, height, width };
2415
2416 std::vector<float> input0({
2417 2, 2, 2, 2, 3, 3, 3, 3,
2418 4, 4, 4, 4, 5, 5, 5, 5 });
2419
2420 std::vector<float> input1({
2421 1, 1, 1, 1, 2, 2, 2, 2,
2422 4, 4, 4, 4, 4, 4, 4, 4 });
2423
2424 std::vector<float> output({
2425 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
2426 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
2427
David Beck5cd01f32018-09-12 16:00:08 +01002428
Sadik Armagan2999a022019-04-09 14:20:12 +01002429 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2430 memoryManager,
2431 shape, input0, 1.0f, 0,
2432 shape, input1, 1.0f, 0,
2433 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002434}
2435
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002436LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
2437 armnn::IWorkloadFactory& workloadFactory,
2438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002439{
2440 unsigned int shape0[] = { 1, 2, 2, 2 };
2441 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2442
2443 unsigned int shape1[] = { 1, 1, 1, 1 };
2444 std::vector<float> input1({ 2 });
2445
2446 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2447
David Beck5cd01f32018-09-12 16:00:08 +01002448
Sadik Armagan2999a022019-04-09 14:20:12 +01002449 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2450 memoryManager,
2451 shape0, input0, 1.0f, 0,
2452 shape1, input1, 1.0f, 0,
2453 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002454}
2455
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002456LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
2457 armnn::IWorkloadFactory& workloadFactory,
2458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002459{
2460 unsigned int shape0[] = { 1, 3, 3, 2 };
2461 std::vector<float> input0({
2462 1, 4, 3, 8, 5, 12,
2463 7, 16, 9, 20, 11, 24,
2464 13, 28, 15, 32, 17, 36});
2465
2466 unsigned int shape1[] = { 1, 1, 1, 2 };
2467 std::vector<float> input1({ 1, 2 });
2468
2469 std::vector<float> output({
2470 1, 2, 3, 4, 5, 6,
2471 7, 8, 9, 10, 11, 12,
2472 13, 14, 15, 16, 17, 18});
2473
Sadik Armagan2999a022019-04-09 14:20:12 +01002474 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2475 memoryManager,
2476 shape0, input0, 1.0f, 0,
2477 shape1, input1, 1.0f, 0,
2478 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002479}
2480
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002481LayerTestResult<uint8_t,4> DivisionUint8Test(
2482 armnn::IWorkloadFactory& workloadFactory,
2483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002484{
2485 const unsigned int width = 2;
2486 const unsigned int height = 2;
2487 const unsigned int channelCount = 2;
2488 const unsigned int batchSize = 2;
2489
2490 unsigned int shape[] = { batchSize, channelCount, height, width };
2491
2492 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2493 4, 4, 4, 4, 5, 5, 5, 5 });
2494
2495 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2496 4, 4, 4, 4, 4, 4, 4, 4 });
2497
2498 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2499 4, 4, 4, 4, 5, 5, 5, 5});
2500
2501
Sadik Armagan2999a022019-04-09 14:20:12 +01002502 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2503 memoryManager,
2504 shape, input0, 1.0f, 0,
2505 shape, input1, 1.0f, 0,
2506 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002507}
2508
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002509LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
2510 armnn::IWorkloadFactory& workloadFactory,
2511 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002512{
2513 unsigned int shape0[] = { 1, 2, 2, 2 };
2514 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2515
2516 unsigned int shape1[] = { 1, 1, 1, 1 };
2517 std::vector<uint8_t> input1({ 2 });
2518
2519 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2520
Sadik Armagan2999a022019-04-09 14:20:12 +01002521 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2522 memoryManager,
2523 shape0, input0, 1.0f, 0,
2524 shape1, input1, 1.0f, 0,
2525 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002526}
2527
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002528LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
2529 armnn::IWorkloadFactory& workloadFactory,
2530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002531{
2532 unsigned int shape0[] = { 1, 3, 3, 2 };
2533 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
2534 7, 16, 9, 20, 11, 24,
2535 13, 28, 15, 32, 17, 36});
2536
2537 unsigned int shape1[] = { 1, 1, 1, 2 };
2538 std::vector<uint8_t> input1({ 1, 2 });
2539
2540 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
2541 7, 8, 9, 10, 11, 12,
2542 13, 14, 15, 16, 17, 18});
2543
Sadik Armagan2999a022019-04-09 14:20:12 +01002544 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2545 memoryManager,
2546 shape0, input0, 1.0f, 0,
2547 shape1, input1, 1.0f, 0,
2548 shape0, output, 1.0f, 0);
2549}
2550
2551LayerTestResult<int16_t,4> DivisionInt16Test(
2552 armnn::IWorkloadFactory& workloadFactory,
2553 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2554{
2555 unsigned int shape[] = { 2, 2, 2, 2 };
2556
2557 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2558 4, 4, 4, 4, 5, 5, 5, 5 });
2559
2560 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2561 4, 4, 4, 4, 4, 4, 4, 4 });
2562
2563 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2564 4, 4, 4, 4, 5, 5, 5, 5});
2565
2566
2567 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2568 memoryManager,
2569 shape, input0, 1.0f, 0,
2570 shape, input1, 1.0f, 0,
2571 shape, output, 0.25f, 0);
2572}
2573
2574LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
2575 armnn::IWorkloadFactory& workloadFactory,
2576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2577{
2578 unsigned int shape0[] = { 1, 2, 2, 2 };
2579 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2580
2581 unsigned int shape1[] = { 1, 1, 1, 1 };
2582 std::vector<int16_t> input1({ 2 });
2583
2584 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2585
2586 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2587 memoryManager,
2588 shape0, input0, 1.0f, 0,
2589 shape1, input1, 1.0f, 0,
2590 shape0, output, 1.0f, 0);
2591}
2592
2593LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2594 armnn::IWorkloadFactory& workloadFactory,
2595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2596{
2597 unsigned int shape0[] = { 1, 3, 3, 2 };
2598 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2599 7, 16, 9, 20, 11, 24,
2600 13, 28, 15, 32, 17, 36});
2601
2602 unsigned int shape1[] = { 1, 1, 1, 2 };
2603 std::vector<int16_t> input1({ 1, 2 });
2604
2605 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2606 7, 8, 9, 10, 11, 12,
2607 13, 14, 15, 16, 17, 18});
2608
2609 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2610 memoryManager,
2611 shape0, input0, 1.0f, 0,
2612 shape1, input1, 1.0f, 0,
2613 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002614}
2615
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002616template<typename DescriptorType>
2617std::unique_ptr<armnn::IWorkload> CreateWorkload(
2618 const armnn::IWorkloadFactory& workloadFactory,
2619 const armnn::WorkloadInfo& info,
2620 const DescriptorType& descriptor)
2621{
2622 return CreateWorkload(workloadFactory, info, descriptor);
2623};
2624
2625template<>
2626std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2627 const armnn::IWorkloadFactory& workloadFactory,
2628 const armnn::WorkloadInfo& info,
2629 const armnn::MaximumQueueDescriptor& descriptor)
2630{
2631 return workloadFactory.CreateMaximum(descriptor, info);
2632}
2633
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002634template<>
2635std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2636 const armnn::IWorkloadFactory& workloadFactory,
2637 const armnn::WorkloadInfo& info,
2638 const armnn::MinimumQueueDescriptor& descriptor)
2639{
2640 return workloadFactory.CreateMinimum(descriptor, info);
2641}
2642
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002643template<>
2644std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2645 const armnn::IWorkloadFactory& workloadFactory,
2646 const armnn::WorkloadInfo& info,
2647 const armnn::EqualQueueDescriptor& descriptor)
2648{
2649 return workloadFactory.CreateEqual(descriptor, info);
2650}
2651
FrancisMurtagh878f0232018-12-19 10:56:15 +00002652template<>
2653std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2654 const armnn::IWorkloadFactory& workloadFactory,
2655 const armnn::WorkloadInfo& info,
2656 const armnn::GreaterQueueDescriptor& descriptor)
2657{
2658 return workloadFactory.CreateGreater(descriptor, info);
2659}
2660
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002661namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002662
2663template <typename Descriptor,
2664 armnn::DataType ArmnnTypeInput,
2665 armnn::DataType ArmnnTypeOutput,
2666 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2667 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2668LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2669 armnn::IWorkloadFactory & workloadFactory,
2670 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2671 const unsigned int shape0[4], std::vector<TInput> values0,
2672 const unsigned int shape1[4], std::vector<TInput> values1,
2673 const unsigned int outShape[4], std::vector<TOutput> outValues,
2674 float qScale = 0.0f, int qOffset = 0)
2675{
2676 const size_t dimensionCount = 4;
2677 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2678 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2679 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2680
2681 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2682 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2683
2684 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002685 {
kevmay012b4d88e2019-01-24 14:05:09 +00002686 inputTensorInfo0.SetQuantizationScale(qScale);
2687 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002688
kevmay012b4d88e2019-01-24 14:05:09 +00002689 inputTensorInfo1.SetQuantizationScale(qScale);
2690 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002691
kevmay012b4d88e2019-01-24 14:05:09 +00002692 outputTensorInfo.SetQuantizationScale(qScale);
2693 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002694 }
kevmay012b4d88e2019-01-24 14:05:09 +00002695
2696 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2697
2698 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2699 {
2700 ret.compareBoolean = true;
2701 }
2702
2703 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2704 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2705 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2706
2707 Descriptor data;
2708 armnn::WorkloadInfo info;
2709 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2710 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2711 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2712 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2713
2714 inputHandle0->Allocate();
2715 inputHandle1->Allocate();
2716 outputHandle->Allocate();
2717
2718 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2719 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2720
Derek Lambertif30f7d32019-04-09 10:25:02 +01002721 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002722 ExecuteWorkload(*workload, memoryManager);
2723
2724 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2725
2726 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2727 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002728}
2729
kevmay012b4d88e2019-01-24 14:05:09 +00002730template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2731LayerTestResult<T, 4> ElementwiseTestHelper(
2732 armnn::IWorkloadFactory & workloadFactory,
2733 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2734 const unsigned int shape0[4], std::vector<T> values0,
2735 const unsigned int shape1[4], std::vector<T> values1,
2736 const unsigned int outShape[4], std::vector<T> outValues,
2737 float qScale = 0.0f, int qOffset = 0)
2738{
2739 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2740 (workloadFactory,
2741 memoryManager,
2742 shape0,
2743 values0,
2744 shape1,
2745 values1,
2746 outShape,
2747 outValues,
2748 qScale,
2749 qOffset);
2750}
2751}
2752
2753LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002755{
2756 const unsigned int width = 2;
2757 const unsigned int height = 2;
2758 const unsigned int channelCount = 2;
2759 const unsigned int batchSize = 2;
2760
2761 unsigned int shape[] = { batchSize, channelCount, height, width };
2762
2763 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2764 3, 3, 3, 3, 4, 4, 4, 4 });
2765
2766 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2767 5, 5, 5, 5, 4, 4, 4, 4 });
2768
kevmay012b4d88e2019-01-24 14:05:09 +00002769 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2770 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002771
kevmay012b4d88e2019-01-24 14:05:09 +00002772 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002773 workloadFactory,
2774 memoryManager,
2775 shape,
2776 input0,
2777 shape,
2778 input1,
2779 shape,
2780 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002781}
2782
kevmay012b4d88e2019-01-24 14:05:09 +00002783LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002784 armnn::IWorkloadFactory& workloadFactory,
2785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2786{
2787 unsigned int shape0[] = { 1, 2, 2, 2 };
2788 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2789
2790 unsigned int shape1[] = { 1, 1, 1, 1 };
2791 std::vector<float> input1({ 1 });
2792
kevmay012b4d88e2019-01-24 14:05:09 +00002793 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002794
kevmay012b4d88e2019-01-24 14:05:09 +00002795 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002796 workloadFactory,
2797 memoryManager,
2798 shape0,
2799 input0,
2800 shape1,
2801 input1,
2802 shape0,
2803 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002804}
2805
kevmay012b4d88e2019-01-24 14:05:09 +00002806LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002807 armnn::IWorkloadFactory& workloadFactory,
2808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2809{
2810 const unsigned int shape0[] = { 1, 2, 2, 3 };
2811 const unsigned int shape1[] = { 1, 1, 1, 3 };
2812
2813 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2814 7, 8, 9, 10, 11, 12 });
2815
2816 std::vector<float> input1({ 1, 2, 3});
2817
kevmay012b4d88e2019-01-24 14:05:09 +00002818 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2819 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002820
kevmay012b4d88e2019-01-24 14:05:09 +00002821 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002822 workloadFactory,
2823 memoryManager,
2824 shape0,
2825 input0,
2826 shape1,
2827 input1,
2828 shape0,
2829 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002830}
2831
2832LayerTestResult<uint8_t, 4> EqualUint8Test(
2833 armnn::IWorkloadFactory& workloadFactory,
2834 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2835{
2836 unsigned int shape[] = { 2, 2, 2, 2 };
2837
2838 // See dequantized values to the right.
2839 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002840 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002841
2842 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2843 3, 3, 3, 3, 5, 5, 5, 5 });
2844
2845 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2846 1, 1, 1, 1, 0, 0, 0, 0 });
2847
kevmay012b4d88e2019-01-24 14:05:09 +00002848 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2849 armnn::DataType::QuantisedAsymm8,
2850 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002851 workloadFactory,
2852 memoryManager,
2853 shape,
2854 input0,
2855 shape,
2856 input1,
2857 shape,
2858 output,
2859 1.0f,
2860 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002861}
2862
2863LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2864 armnn::IWorkloadFactory& workloadFactory,
2865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2866{
2867 const unsigned int shape0[] = { 1, 2, 2, 3 };
2868 const unsigned int shape1[] = { 1, 1, 1, 1 };
2869
2870 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2871 7, 8, 9, 10, 11, 12 });
2872
2873 std::vector<uint8_t> input1({ 1 });
2874
2875 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2876 0, 0, 0, 0, 0, 0 });
2877
kevmay012b4d88e2019-01-24 14:05:09 +00002878 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2879 armnn::DataType::QuantisedAsymm8,
2880 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002881 workloadFactory,
2882 memoryManager,
2883 shape0,
2884 input0,
2885 shape1,
2886 input1,
2887 shape0,
2888 output,
2889 1.0f,
2890 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002891}
2892
2893LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2894 armnn::IWorkloadFactory& workloadFactory,
2895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2896{
2897 const unsigned int shape0[] = { 1, 2, 2, 3 };
2898 const unsigned int shape1[] = { 1, 1, 1, 3 };
2899
2900 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2901 7, 8, 9, 10, 11, 12 });
2902
2903 std::vector<uint8_t> input1({ 1, 1, 3});
2904
2905 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2906 0, 0, 0, 0, 0, 0 });
2907
kevmay012b4d88e2019-01-24 14:05:09 +00002908 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2909 armnn::DataType::QuantisedAsymm8,
2910 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002911 workloadFactory,
2912 memoryManager,
2913 shape0,
2914 input0,
2915 shape1,
2916 input1,
2917 shape0,
2918 output,
2919 1.0f,
2920 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002921}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002922
kevmay012b4d88e2019-01-24 14:05:09 +00002923LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2925{
2926 const unsigned int width = 2;
2927 const unsigned int height = 2;
2928 const unsigned int channelCount = 2;
2929 const unsigned int batchSize = 2;
2930
2931 unsigned int shape[] = { batchSize, channelCount, height, width };
2932
2933 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2934 3, 3, 3, 3, 4, 4, 4, 4 });
2935
2936 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2937 5, 5, 5, 5, 4, 4, 4, 4 });
2938
kevmay012b4d88e2019-01-24 14:05:09 +00002939 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2940 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002941
kevmay012b4d88e2019-01-24 14:05:09 +00002942 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002943 workloadFactory,
2944 memoryManager,
2945 shape,
2946 input0,
2947 shape,
2948 input1,
2949 shape,
2950 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002951}
2952
kevmay012b4d88e2019-01-24 14:05:09 +00002953LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002954 armnn::IWorkloadFactory& workloadFactory,
2955 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2956{
2957 unsigned int shape0[] = { 1, 2, 2, 2 };
2958 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2959
2960 unsigned int shape1[] = { 1, 1, 1, 1 };
2961 std::vector<float> input1({ 1 });
2962
kevmay012b4d88e2019-01-24 14:05:09 +00002963 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002964
kevmay012b4d88e2019-01-24 14:05:09 +00002965 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002966 workloadFactory,
2967 memoryManager,
2968 shape0,
2969 input0,
2970 shape1,
2971 input1,
2972 shape0,
2973 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002974}
2975
kevmay012b4d88e2019-01-24 14:05:09 +00002976LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002977 armnn::IWorkloadFactory& workloadFactory,
2978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2979{
2980 const unsigned int shape0[] = { 1, 2, 2, 3 };
2981 const unsigned int shape1[] = { 1, 1, 1, 3 };
2982
2983 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2984 7, 8, 9, 10, 11, 12 });
2985
2986 std::vector<float> input1({ 1, 3, 2});
2987
kevmay012b4d88e2019-01-24 14:05:09 +00002988 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2989 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002990
kevmay012b4d88e2019-01-24 14:05:09 +00002991 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002992 workloadFactory,
2993 memoryManager,
2994 shape0,
2995 input0,
2996 shape1,
2997 input1,
2998 shape0,
2999 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003000}
3001
3002LayerTestResult<uint8_t, 4> GreaterUint8Test(
3003 armnn::IWorkloadFactory& workloadFactory,
3004 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3005{
3006 unsigned int shape[] = { 2, 2, 2, 2 };
3007
3008 // See dequantized values to the right.
3009 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3010 3, 3, 3, 3, 5, 5, 5, 5 });
3011
3012 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3013 2, 2, 2, 2, 5, 5, 5, 5 });
3014
3015 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3016 1, 1, 1, 1, 0, 0, 0, 0 });
3017
kevmay012b4d88e2019-01-24 14:05:09 +00003018 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3019 armnn::DataType::QuantisedAsymm8,
3020 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003021 workloadFactory,
3022 memoryManager,
3023 shape,
3024 input0,
3025 shape,
3026 input1,
3027 shape,
3028 output,
3029 1.0f,
3030 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003031}
3032
3033LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3034 armnn::IWorkloadFactory& workloadFactory,
3035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3036{
3037 const unsigned int shape0[] = { 1, 2, 2, 3 };
3038 const unsigned int shape1[] = { 1, 1, 1, 1 };
3039
3040 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3041 7, 8, 9, 10, 11, 12 });
3042
3043 std::vector<uint8_t> input1({ 1 });
3044
3045 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3046 1, 1, 1, 1, 1, 1 });
3047
kevmay012b4d88e2019-01-24 14:05:09 +00003048 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3049 armnn::DataType::QuantisedAsymm8,
3050 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003051 workloadFactory,
3052 memoryManager,
3053 shape0,
3054 input0,
3055 shape1,
3056 input1,
3057 shape0,
3058 output,
3059 1.0f,
3060 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003061}
3062
3063LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3064 armnn::IWorkloadFactory& workloadFactory,
3065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3066{
3067 const unsigned int shape0[] = { 1, 2, 2, 3 };
3068 const unsigned int shape1[] = { 1, 1, 1, 3 };
3069
3070 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3071 7, 8, 9, 10, 11, 12 });
3072
3073 std::vector<uint8_t> input1({ 1, 1, 3});
3074
3075 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3076 1, 1, 1, 1, 1, 1 });
3077
kevmay012b4d88e2019-01-24 14:05:09 +00003078 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3079 armnn::DataType::QuantisedAsymm8,
3080 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003081 workloadFactory,
3082 memoryManager,
3083 shape0,
3084 input0,
3085 shape1,
3086 input1,
3087 shape0,
3088 output,
3089 1.0f,
3090 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003091}
3092
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003093LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3094 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3095{
3096 const unsigned int width = 2;
3097 const unsigned int height = 2;
3098 const unsigned int channelCount = 2;
3099 const unsigned int batchSize = 2;
3100
3101 unsigned int shape[] = { batchSize, channelCount, height, width };
3102
3103 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3104 3, 3, 3, 3, 4, 4, 4, 4 });
3105
3106 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3107 4, 4, 4, 4, 5, 5, 5, 5 });
3108
3109 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
3110 4, 4, 4, 4, 5, 5, 5, 5 });
3111
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003112 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3113 workloadFactory,
3114 memoryManager,
3115 shape,
3116 input0,
3117 shape,
3118 input1,
3119 shape,
3120 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003121}
3122
3123LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3124 armnn::IWorkloadFactory& workloadFactory,
3125 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3126{
3127 unsigned int shape0[] = { 1, 2, 2, 2 };
3128 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3129
3130 unsigned int shape1[] = { 1, 1, 1, 1 };
3131 std::vector<float> input1({ 2 });
3132
3133 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3134
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003135 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3136 workloadFactory,
3137 memoryManager,
3138 shape0,
3139 input0,
3140 shape1,
3141 input1,
3142 shape0,
3143 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003144}
3145
3146LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3147 armnn::IWorkloadFactory& workloadFactory,
3148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3149{
3150 const unsigned int shape0[] = { 1, 2, 2, 3 };
3151 const unsigned int shape1[] = { 1, 1, 1, 3 };
3152
3153 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3154 7, 8, 9, 10, 11, 12 });
3155
3156 std::vector<float> input1({ 1, 2, 3});
3157
3158 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003159 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003160
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003161 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3162 workloadFactory,
3163 memoryManager,
3164 shape0,
3165 input0,
3166 shape1,
3167 input1,
3168 shape0,
3169 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003170}
3171
3172LayerTestResult<uint8_t, 4> MaximumUint8Test(
3173 armnn::IWorkloadFactory& workloadFactory,
3174 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3175{
3176 unsigned int shape[] = { 2, 2, 2, 2 };
3177
3178 // See dequantized values to the right.
3179 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3180 3, 3, 3, 3, 4, 4, 4, 4 });
3181
3182 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3183 4, 4, 4, 4, 5, 5, 5, 5 });
3184
3185 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3186 4, 4, 4, 4, 5, 5, 5, 5 });
3187
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003188 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3189 workloadFactory,
3190 memoryManager,
3191 shape,
3192 input0,
3193 shape,
3194 input1,
3195 shape,
3196 output,
3197 1.0f,
3198 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003199}
3200
3201LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3202 armnn::IWorkloadFactory& workloadFactory,
3203 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3204{
3205 const unsigned int shape0[] = { 1, 2, 2, 3 };
3206 const unsigned int shape1[] = { 1, 1, 1, 1 };
3207
3208 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3209 7, 8, 9, 10, 11, 12 });
3210
3211 std::vector<uint8_t> input1({2});
3212
3213 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3214 7, 8, 9, 10, 11, 12 });
3215
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003216 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3217 workloadFactory,
3218 memoryManager,
3219 shape0,
3220 input0,
3221 shape1,
3222 input1,
3223 shape0,
3224 output,
3225 1.0f,
3226 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003227}
3228
3229LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3230 armnn::IWorkloadFactory& workloadFactory,
3231 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3232{
3233 const unsigned int shape0[] = { 1, 2, 2, 3 };
3234 const unsigned int shape1[] = { 1, 1, 1, 3 };
3235
3236 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3237 7, 8, 9, 10, 11, 12 });
3238
3239 std::vector<uint8_t> input1({ 1, 10, 3});
3240
3241 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3242 7, 10, 9, 10, 11, 12 });
3243
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003244 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3245 workloadFactory,
3246 memoryManager,
3247 shape0,
3248 input0,
3249 shape1,
3250 input1,
3251 shape0,
3252 output,
3253 1.0f,
3254 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003255}
3256
Sadik Armagan2999a022019-04-09 14:20:12 +01003257LayerTestResult<int16_t, 4> MaximumInt16Test(
3258 armnn::IWorkloadFactory& workloadFactory,
3259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3260{
3261 unsigned int shape[] = { 2, 2, 2, 2 };
3262
3263 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3264 3, 3, 3, 3, 4, 4, 4, 4 });
3265
3266 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3267 4, 4, 4, 4, 5, 5, 5, 5 });
3268
3269 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3270 4, 4, 4, 4, 5, 5, 5, 5 });
3271
3272 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3273 workloadFactory,
3274 memoryManager,
3275 shape,
3276 input0,
3277 shape,
3278 input1,
3279 shape,
3280 output,
3281 1.0f,
3282 0);
3283}
3284
3285LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3286 armnn::IWorkloadFactory& workloadFactory,
3287 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3288{
3289 const unsigned int shape0[] = { 1, 2, 2, 3 };
3290 const unsigned int shape1[] = { 1, 1, 1, 1 };
3291
3292 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3293 7, 8, 9, 10, 11, 12 });
3294
3295 std::vector<int16_t> input1({2});
3296
3297 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3298 7, 8, 9, 10, 11, 12 });
3299
3300 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3301 workloadFactory,
3302 memoryManager,
3303 shape0,
3304 input0,
3305 shape1,
3306 input1,
3307 shape0,
3308 output,
3309 1.0f,
3310 0);
3311}
3312
3313LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3314 armnn::IWorkloadFactory& workloadFactory,
3315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3316{
3317 const unsigned int shape0[] = { 1, 2, 2, 3 };
3318 const unsigned int shape1[] = { 1, 1, 1, 3 };
3319
3320 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3321 7, 8, 9, 10, 11, 12 });
3322
3323 std::vector<int16_t> input1({ 1, 10, 3});
3324
3325 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3326 7, 10, 9, 10, 11, 12 });
3327
3328 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3329 workloadFactory,
3330 memoryManager,
3331 shape0,
3332 input0,
3333 shape1,
3334 input1,
3335 shape0,
3336 output,
3337 1.0f,
3338 0);
3339}
3340
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003341LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3342 armnn::IWorkloadFactory& workloadFactory,
3343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3344{
3345 unsigned int shape0[] = { 1, 2, 2, 2 };
3346 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3347
3348 unsigned int shape1[] = { 1, 1, 1, 1 };
3349 std::vector<float> input1({ 2 });
3350
3351 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3352
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003353 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3354 workloadFactory,
3355 memoryManager,
3356 shape0,
3357 input0,
3358 shape1,
3359 input1,
3360 shape0,
3361 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003362}
3363
3364
3365LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3366 armnn::IWorkloadFactory& workloadFactory,
3367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3368{
3369 unsigned int shape0[] = { 1, 2, 2, 2 };
3370 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3371
3372 unsigned int shape1[] = { 1, 1, 1, 1 };
3373 std::vector<float> input1({ 5 });
3374
3375 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3376
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003377 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3378 workloadFactory,
3379 memoryManager,
3380 shape0,
3381 input0,
3382 shape1,
3383 input1,
3384 shape0,
3385 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003386}
3387
3388LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3389 armnn::IWorkloadFactory & workloadFactory,
3390 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3391{
3392 const unsigned int shape0[] = { 1, 2, 2, 3 };
3393 const unsigned int shape1[] = { 1, 1, 1, 3 };
3394
3395 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
3396 7, 1, 2, 3, 4, 5 });
3397
3398 std::vector<uint8_t> input1({ 1, 2, 3});
3399
3400 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
3401 1, 1, 2, 1, 2, 3 });
3402
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003403 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3404 workloadFactory,
3405 memoryManager,
3406 shape0,
3407 input0,
3408 shape1,
3409 input1,
3410 shape0,
3411 output,
3412 1.0f,
3413 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003414}
3415
Sadik Armagan2999a022019-04-09 14:20:12 +01003416LayerTestResult<int16_t, 4> MinimumInt16Test(
3417 armnn::IWorkloadFactory& workloadFactory,
3418 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3419{
3420 unsigned int shape[] = { 2, 2, 2, 2 };
3421
3422 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3423 3, 3, 3, 3, 4, 4, 4, 4 });
3424
3425 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3426 4, 4, 4, 4, 5, 5, 5, 5 });
3427
3428 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
3429 3, 3, 3, 3, 4, 4, 4, 4 });
3430
3431 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3432 workloadFactory,
3433 memoryManager,
3434 shape,
3435 input0,
3436 shape,
3437 input1,
3438 shape,
3439 output,
3440 1.0f,
3441 0);
3442}
3443
3444LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
3445 armnn::IWorkloadFactory& workloadFactory,
3446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3447{
3448 const unsigned int shape0[] = { 1, 2, 2, 3 };
3449 const unsigned int shape1[] = { 1, 1, 1, 1 };
3450
3451 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3452 7, 8, 9, 10, 11, 12 });
3453
3454 std::vector<int16_t> input1({2});
3455
3456 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
3457 2, 2, 2, 2, 2, 2 });
3458
3459 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3460 workloadFactory,
3461 memoryManager,
3462 shape0,
3463 input0,
3464 shape1,
3465 input1,
3466 shape0,
3467 output,
3468 1.0f,
3469 0);
3470}
3471
3472LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
3473 armnn::IWorkloadFactory& workloadFactory,
3474 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3475{
3476 const unsigned int shape0[] = { 1, 2, 2, 3 };
3477 const unsigned int shape1[] = { 1, 1, 1, 3 };
3478
3479 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3480 7, 8, 9, 10, 11, 12 });
3481
3482 std::vector<int16_t> input1({ 1, 10, 3});
3483
3484 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
3485 1, 8, 3, 1, 10, 3 });
3486
3487 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3488 workloadFactory,
3489 memoryManager,
3490 shape0,
3491 input0,
3492 shape1,
3493 input1,
3494 shape0,
3495 output,
3496 1.0f,
3497 0);
3498}
3499
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003500namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003501LayerTestResult<float,4> MultiplicationTestHelper(
3502 armnn::IWorkloadFactory& workloadFactory,
3503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3504 const unsigned int shape0[4],
3505 const std::vector<float> & values0,
3506 const unsigned int shape1[4],
3507 const std::vector<float> & values1,
3508 const unsigned int outShape[4],
3509 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00003510{
surmeh01bceff2f2018-03-29 16:29:27 +01003511 const size_t dimensionCount = 4;
3512 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
3513 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
3514 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00003515
surmeh01bceff2f2018-03-29 16:29:27 +01003516 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
3517 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003518
3519 LayerTestResult<float,4> ret(outputTensorInfo);
3520
3521 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3522 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3523 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3524
3525 armnn::MultiplicationQueueDescriptor data;
3526 armnn::WorkloadInfo info;
3527 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3528 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3529 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3530
3531 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3532
3533 inputHandle0->Allocate();
3534 inputHandle1->Allocate();
3535 outputHandle->Allocate();
3536
3537 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3538 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3539
Derek Lambertif30f7d32019-04-09 10:25:02 +01003540 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003541 workload->Execute();
3542
3543 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3544
surmeh01bceff2f2018-03-29 16:29:27 +01003545 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003546 return ret;
3547}
surmeh01bceff2f2018-03-29 16:29:27 +01003548} // anonymous namespace
3549
3550
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003551LayerTestResult<float,4> MultiplicationTest(
3552 armnn::IWorkloadFactory& workloadFactory,
3553 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003554{
3555 const unsigned int width = 2;
3556 const unsigned int height = 2;
3557 const unsigned int channelCount = 2;
3558 const unsigned int batchSize = 2;
3559
3560 unsigned int shape[] = { batchSize, channelCount, height, width };
3561
3562 std::vector<float> input0({
3563 1, 1, 1, 1, 2, 2, 2, 2,
3564 3, 3, 3, 3, 4, 4, 4, 4 });
3565
3566 std::vector<float> input1({
3567 2, 2, 2, 2, 3, 3, 3, 3,
3568 4, 4, 4, 4, 5, 5, 5, 5 });
3569
3570 std::vector<float> output({
3571 2, 2, 2, 2, 6, 6, 6, 6,
3572 12, 12, 12, 12, 20, 20, 20, 20 });
3573
3574 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003575 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003576 shape,
3577 input0,
3578 shape,
3579 input1,
3580 shape,
3581 output);
3582}
3583
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003584LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3585 armnn::IWorkloadFactory& workloadFactory,
3586 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003587{
3588 unsigned int shape0[] = { 1, 2, 2, 2 };
3589 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3590
3591 unsigned int shape1[] = { 1, 1, 1, 1 };
3592 std::vector<float> input1({ 2 });
3593
3594 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3595
3596 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003597 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003598 shape0,
3599 input0,
3600 shape1,
3601 input1,
3602 shape0,
3603 output);
3604}
3605
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003606LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3607 armnn::IWorkloadFactory& workloadFactory,
3608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003609{
3610 unsigned int shape0[] = { 1, 3, 3, 2 };
3611 std::vector<float> input0({
3612 1, 2, 3, 4, 5, 6,
3613 7, 8, 9, 10, 11, 12,
3614 13, 14, 15, 16, 17, 18});
3615
3616 unsigned int shape1[] = { 1, 1, 1, 2 };
3617 std::vector<float> input1({ 1, 2 });
3618
3619 std::vector<float> output({
3620 1, 4, 3, 8, 5, 12,
3621 7, 16, 9, 20, 11, 24,
3622 13, 28, 15, 32, 17, 36});
3623
3624 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003625 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003626 shape0,
3627 input0,
3628 shape1,
3629 input1,
3630 shape0,
3631 output);
3632}
telsoa014fcda012018-03-09 14:13:49 +00003633
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003634LayerTestResult<float,4> CompareMultiplicationTest(
3635 armnn::IWorkloadFactory& workloadFactory,
3636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3637 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003638{
3639 const unsigned int width = 16;
3640 const unsigned int height = 32;
3641 const unsigned int channelCount = 2;
3642 const unsigned int batchSize = 5;
3643
3644 armnn::TensorInfo inputTensorInfo0;
3645 armnn::TensorInfo inputTensorInfo1;
3646 armnn::TensorInfo outputTensorInfo;
3647
3648 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3649
3650 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3651 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3652 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3653
3654 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3655
3656 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3657 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3658
3659 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3660 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3661 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3662
3663 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3664 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3665 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3666
3667 armnn::MultiplicationQueueDescriptor data;
3668 armnn::WorkloadInfo info;
3669 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3670 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3671 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3672
3673 armnn::MultiplicationQueueDescriptor refData = data;
3674 armnn::WorkloadInfo refInfo = info;
3675 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3676 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3677 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3678
3679 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3680 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3681
3682 inputHandle0->Allocate();
3683 inputHandle1->Allocate();
3684 outputHandle->Allocate();
3685 inputHandle0Ref->Allocate();
3686 inputHandle1Ref->Allocate();
3687 outputHandleRef->Allocate();
3688
3689 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3690 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3691 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3692 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3693
Derek Lambertif30f7d32019-04-09 10:25:02 +01003694 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003695 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003696 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003697 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003698 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3699 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3700
3701 return comparisonResult;
3702}
3703
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003704LayerTestResult<float,4> CompareBatchNormTest(
3705 armnn::IWorkloadFactory& workloadFactory,
3706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3707 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003708{
3709 const unsigned int width = 2;
3710 const unsigned int height = 3;
3711 const unsigned int channels = 5;
3712 const unsigned int batchSize = 3;
3713
3714 armnn::TensorInfo inputTensorInfo;
3715 armnn::TensorInfo outputTensorInfo;
3716 armnn::TensorInfo tensorInfo;
3717
3718 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3719 constexpr unsigned int tensorShape[] = {channels};
3720
3721 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3722 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3723 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3724
3725 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3726
3727 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3728 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3729 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3730 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3731
3732 LayerTestResult<float,4> ret(outputTensorInfo);
3733
3734 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3735 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3736
3737 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3738 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3739
3740 armnn::BatchNormalizationQueueDescriptor data;
3741 armnn::WorkloadInfo info;
3742 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3743 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3744 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3745 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3746
3747 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
3748 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
3749 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
3750 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
3751
3752 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3753 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3754 data.m_Mean = &meanTensor;
3755 data.m_Variance = &varianceTensor;
3756 data.m_Beta = &betaTensor;
3757 data.m_Gamma = &gammaTensor;
3758 data.m_Parameters.m_Eps = 0.01f;
3759
3760 armnn::BatchNormalizationQueueDescriptor refData = data;
3761 armnn::WorkloadInfo refInfo = info;
3762 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3763 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3764
3765 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
3766 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
3767
3768 inputHandle->Allocate();
3769 outputHandle->Allocate();
3770 inputHandleRef->Allocate();
3771 outputHandleRef->Allocate();
3772
3773 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3774 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3775
Derek Lambertif30f7d32019-04-09 10:25:02 +01003776 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003777 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003778 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003779 workloadRef->Execute();
3780
3781 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3782 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3783
3784 return ret;
3785}
3786
surmeh013537c2c2018-05-18 16:31:43 +01003787template<typename T>
3788void PermuteTensorData(
3789 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003791 const armnn::PermutationVector& mappings,
3792 armnn::TensorInfo & inputTensorInfo,
3793 const T * inputData,
3794 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003795{
surmeh013537c2c2018-05-18 16:31:43 +01003796 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3797 if (inputData == nullptr)
3798 {
3799 // Nullptr is an error in the test. By returning without doing the concatenation
3800 // I expect the caller to fail the test. It still makes sense to report this as
3801 // an assert for Debug builds.
3802 return;
3803 }
telsoa014fcda012018-03-09 14:13:49 +00003804
surmeh013537c2c2018-05-18 16:31:43 +01003805 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3806
3807 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3808 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3809
3810 armnn::PermuteQueueDescriptor queueDescriptor;
3811 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3812 armnn::WorkloadInfo workloadInfo;
3813 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3814 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3815
3816 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3817
3818 inputHandle->Allocate();
3819 outputHandle->Allocate();
3820
3821 CopyDataToITensorHandle(inputHandle.get(), inputData);
3822
Derek Lambertif30f7d32019-04-09 10:25:02 +01003823 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01003824 workload->Execute();
3825
3826 outputData.resize(outputTensorInfo.GetNumElements());
3827 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3828 inputTensorInfo = outputTensorInfo;
3829}
3830
Jim Flynn825af452019-05-20 12:49:28 +01003831armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01003832 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3833 unsigned int concatDim)
3834{
telsoa014fcda012018-03-09 14:13:49 +00003835 std::vector<armnn::TensorShape> shapes;
3836 shapes.reserve(inputTensorInfos.size());
3837 for (const armnn::TensorInfo& it: inputTensorInfos)
3838 {
3839 shapes.push_back(it.GetShape());
3840 }
surmeh013537c2c2018-05-18 16:31:43 +01003841
Jim Flynn825af452019-05-20 12:49:28 +01003842 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
3843 shapes.end(),
3844 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01003845}
3846
3847//
narpra015cdda352018-11-19 15:30:27 +00003848// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
3849// In case of <4 dimensions we need to make sure that the concat dimensions are at least
3850// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01003851//
3852
3853bool NeedPermuteForConcat(
3854 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3855 unsigned int concatDim)
3856{
3857 // See note above. Additionally we expect the input shapes to have the
3858 // same number of dimensions.
3859 unsigned int nDimensions = 0;
3860
telsoa01c577f2c2018-08-31 09:22:23 +01003861 // Determine the number of dimensions as well as sanity check them
3862 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01003863 for (auto && tensorInfo : inputTensorInfos)
3864 {
3865 if (!nDimensions)
3866 {
3867 nDimensions = tensorInfo.GetShape().GetNumDimensions();
3868 }
3869 else
3870 {
3871 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
3872 "Input shapes must have the same number of dimensions");
3873 }
3874 }
3875
narpra015cdda352018-11-19 15:30:27 +00003876 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01003877}
3878
3879armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
3880{
3881 unsigned int numDims = inputShape.GetNumDimensions();
3882 if (numDims >= 3)
3883 {
3884 // Nothing to do if the inputShape has at least 3 dimensions.
3885 return inputShape;
3886 }
3887
3888 std::vector<unsigned int> newDims(size_t(3), 1u);
3889 unsigned int expandedBy = 3 - numDims;
3890 for (unsigned int i=0; i<numDims; ++i)
3891 {
3892 newDims[expandedBy+i] = inputShape[i];
3893 }
3894 return armnn::TensorShape(3u, &newDims[0]);
3895}
3896
3897void Generate3dPermuteVectorForConcat(
3898 unsigned int numDimensions,
3899 unsigned int & concatDim,
3900 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
3901{
3902 BOOST_ASSERT_MSG(numDimensions <= 3,
3903 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01003904 unsigned int expandedBy = 3 - numDimensions;
3905 unsigned int expandedConcatAxis = concatDim + expandedBy;
3906
3907 if (expandedConcatAxis == 2)
3908 {
3909 concatDim = 0;
3910 armnn::PermutationVector forwardPermutation({1, 2, 0});
3911 armnn::PermutationVector reversePermutation({2, 0, 1});
3912 permutations = std::make_pair(forwardPermutation, reversePermutation);
3913 }
3914 else if (expandedConcatAxis == 1)
3915 {
3916 concatDim = 0;
3917 armnn::PermutationVector forwardPermutation({2, 0, 1});
3918 armnn::PermutationVector reversePermutation({1, 2, 0});
3919 permutations = std::make_pair(forwardPermutation, reversePermutation);
3920 }
3921 else
3922 {
3923 BOOST_ASSERT(expandedConcatAxis == 0);
3924 concatDim = 0;
3925 }
3926}
3927
3928//
3929// Permute the input tensors so we can do a supported concatenation.
3930// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
3931// at the front. Finally this function tells what the output shape
3932// of the permuted concatenated tensor is going to be.
3933//
3934template <typename T>
3935void PermuteInputsForConcat(
3936 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003938 std::vector<armnn::TensorInfo> & inputTensorInfos,
3939 std::vector<T *> & inputData,
3940 std::vector<std::vector<T>> & inputDataStorage,
3941 armnn::PermutationVector & permuteVector,
3942 unsigned int & concatDim,
3943 armnn::TensorInfo & outputTensorInfo)
3944{
3945 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
3946 "Expecting more than one tensor to be concatenated here");
3947
3948 unsigned int numDims = 0;
3949 unsigned int nthInput = 0;
3950 const armnn::PermutationVector identity({0, 1, 2});
3951
3952 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
3953 std::make_pair(identity, identity);
3954
3955 inputDataStorage.resize(inputData.size());
3956
3957 for (auto && tensorInfo : inputTensorInfos)
3958 {
3959 if (numDims == 0)
3960 {
3961 numDims = tensorInfo.GetShape().GetNumDimensions();
3962 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00003963
telsoa01c577f2c2018-08-31 09:22:23 +01003964 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01003965 permuteVector = permutations.second;
3966 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
3967 "Test logic error, we don't need permutation, so we shouldn't arrive here");
3968 }
3969 else
3970 {
3971 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
3972 "All inputs must have the same number of dimensions");
3973 }
3974
3975 armnn::TensorInfo newTensorInfo = tensorInfo;
3976 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
3977
3978 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003979 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003980 permutations.first,
3981 newTensorInfo,
3982 inputData[nthInput],
3983 inputDataStorage[nthInput]);
3984
3985 inputData[nthInput] = inputDataStorage[nthInput].data();
3986 inputTensorInfos[nthInput] = newTensorInfo;
3987
3988 ++nthInput;
3989 }
3990
3991 outputTensorInfo.SetShape(
3992 armnnUtils::Permuted(
3993 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
3994 permutations.first));
3995}
3996
3997
3998//
3999// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01004000// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01004001// output.
4002//
4003template <typename T>
4004void PermuteOutputForConcat(
4005 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004007 const armnn::TensorInfo & tensorInfo,
4008 const armnn::PermutationVector & permuteVector,
4009 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4010 T * data)
4011{
4012 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4013 if (data == nullptr)
4014 {
4015 // Nullptr is an error in the test. By returning without doing the permutation
4016 // I expect the caller to fail the test. It still makes sense to report this as
4017 // an assert for Debug builds.
4018 return;
4019 }
4020
4021 armnn::TensorInfo resultTensorInfo = tensorInfo;
4022 std::vector<T> inputData(tensorInfo.GetNumElements());
4023 std::vector<T> outputData;
4024
4025 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4026
4027 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004028 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004029 permuteVector,
4030 resultTensorInfo,
4031 &inputData[0],
4032 outputData);
4033
4034 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4035}
4036
4037template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004038void Concatenate(
4039 armnn::IWorkloadFactory& workloadFactory,
4040 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4041 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4042 std::initializer_list<T *> inputsOrig,
4043 const armnn::TensorInfo& outputTensorInfoOrig,
4044 T * output,
narpra015cdda352018-11-19 15:30:27 +00004045 unsigned int concatDim,
4046 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01004047{
4048 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4049 if (output == nullptr)
4050 {
4051 // Nullptr is an error in the test. By returning without doing the permutation
4052 // I expect the caller to fail the test. It still makes sense to report this as
4053 // an assert for Debug builds.
4054 return;
4055 }
4056
telsoa01c577f2c2018-08-31 09:22:23 +01004057 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01004058 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4059 std::vector<T *> inputs = inputsOrig;
4060 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4061
4062 armnn::PermutationVector permuteVector{0, 1, 2};
4063
telsoa01c577f2c2018-08-31 09:22:23 +01004064 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01004065 std::vector<std::vector<T>> tmpInputDataStorage;
4066
4067 const size_t inputCount = inputTensorInfos.size();
4068
4069 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4070
4071 if (needPermuteForConcat)
4072 {
4073 //
4074 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01004075 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01004076 //
4077 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004078 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004079 inputTensorInfos,
4080 inputs,
4081 tmpInputDataStorage,
4082 permuteVector,
4083 concatDim,
4084 outputTensorInfo);
4085 }
4086
narpra015cdda352018-11-19 15:30:27 +00004087 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00004088
4089 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4090 inputHandles.reserve(inputCount);
4091
narpra015cdda352018-11-19 15:30:27 +00004092 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4093
Jim Flynne242f2d2019-05-22 14:24:13 +01004094 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01004095 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00004096 queueDescriptor.m_Parameters = viewsDescriptor;
4097
4098 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004099 {
narpra015cdda352018-11-19 15:30:27 +00004100 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4101 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4102 {
4103 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4104 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4105 }
telsoa014fcda012018-03-09 14:13:49 +00004106
narpra015cdda352018-11-19 15:30:27 +00004107 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004108
narpra015cdda352018-11-19 15:30:27 +00004109 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4110 for (unsigned int i = 0; i < inputCount; ++i)
4111 {
4112 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4113 std::unique_ptr<armnn::ITensorHandle> inputHandle =
4114 subTensorsSupported ?
4115 workloadFactory.CreateSubTensorHandle(*outputHandle,
4116 inputTensorInfo.GetShape(),
4117 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4118 workloadFactory.CreateTensorHandle(inputTensorInfo);
4119
4120 inputHandles.emplace_back(std::move(inputHandle));
4121 }
4122
telsoa014fcda012018-03-09 14:13:49 +00004123 }
narpra015cdda352018-11-19 15:30:27 +00004124 else
4125 {
4126 for (unsigned int i = 0; i < inputCount; ++i)
4127 {
4128 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4129 inputHandles.emplace_back(std::move(inputHandle));
4130 }
4131 }
telsoa014fcda012018-03-09 14:13:49 +00004132
4133 for (unsigned int i = 0; i < inputCount; ++i)
4134 {
surmeh013537c2c2018-05-18 16:31:43 +01004135 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00004136 }
4137
4138 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4139
Jim Flynn4ed6c832019-05-20 11:02:46 +01004140 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00004141
4142 for (auto& inputHandle : inputHandles)
4143 {
4144 inputHandle->Allocate();
4145 }
4146
4147 outputHandle->Allocate();
4148
4149 unsigned int nextInputId = 0;
4150 for (auto& inputHandle : inputHandles)
4151 {
surmeh013537c2c2018-05-18 16:31:43 +01004152 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4153 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00004154 }
4155
Derek Lambertif30f7d32019-04-09 10:25:02 +01004156 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004157 workload->Execute();
4158
surmeh013537c2c2018-05-18 16:31:43 +01004159 if (needPermuteForConcat)
4160 {
4161 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004162 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004163 outputTensorInfo,
4164 permuteVector,
4165 std::move(outputHandle),
4166 output);
4167 }
4168 else
4169 {
4170 CopyDataFromITensorHandle(output, outputHandle.get());
4171 }
telsoa014fcda012018-03-09 14:13:49 +00004172}
4173
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004174template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004175LayerTestResult<T, 1> Concatenation1dTestImpl(
4176 armnn::IWorkloadFactory& workloadFactory,
4177 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4178 float qScale,
4179 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004180{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004181 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004182
4183 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4184 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4185 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4186
Jim Flynncbb66aa2019-05-15 13:03:54 +01004187 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004188
4189 LayerTestResult<T, 1> result(outputTensorInfo);
4190
4191 std::vector<T> output;
4192 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004193 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004194 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4195 { input0.data(), input1.data(), input2.data() },
4196 outputTensorInfo,
4197 output.data(),
4198 0,
4199 true);
telsoa014fcda012018-03-09 14:13:49 +00004200
4201 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4202 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4203 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4204 }));
4205
4206 return result;
4207}
4208
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004209LayerTestResult<float, 1> Concatenation1dTest(
4210 armnn::IWorkloadFactory& workloadFactory,
4211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004212{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004213 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004214}
4215
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004216template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004217LayerTestResult<T, 2> Concatenation2dTestImpl(
4218 armnn::IWorkloadFactory& workloadFactory,
4219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004220 const armnn::TensorInfo& outputTensorInfo,
4221 unsigned int dimension,
4222 const float qScale,
4223 const int32_t qOffset)
4224{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004225 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004226
4227 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4228 // Batch 0
4229 1.0f, 2.0f, 3.0f,
4230
4231 // Batch 1
4232 10.0f, 11.0f, 12.0f,
4233 }));
4234
4235 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4236 // Batch 0
4237 4.0f, 5.0f, 6.0f,
4238
4239 // Batch 1
4240 13.0f, 14.0f, 15.0f,
4241 }));
4242
4243 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4244 // Batch 0
4245 7.0f, 8.0f, 9.0f,
4246
4247 // Batch 1
4248 16.0f, 17.0f, 18.0f,
4249 }));
4250
4251 LayerTestResult<T, 2> result(outputTensorInfo);
4252
4253 std::vector<T> output;
4254 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004255 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004256 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4257 { input0.data(), input1.data(), input2.data() },
4258 outputTensorInfo,
4259 output.data(),
4260 dimension,
4261 true);
telsoa014fcda012018-03-09 14:13:49 +00004262
4263 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4264 return result;
4265}
4266
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004267template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004268LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4269 armnn::IWorkloadFactory& workloadFactory,
4270 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4271 float qScale,
4272 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004273{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004274 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004275
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004276 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4277 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4278
telsoa014fcda012018-03-09 14:13:49 +00004279 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4280 // Batch 0
4281 1.0f, 2.0f, 3.0f,
4282
4283 // Batch 1
4284 10.0f, 11.0f, 12.0f,
4285
4286 // Batch 2
4287 4.0f, 5.0f, 6.0f,
4288
4289 // Batch 3
4290 13.0f, 14.0f, 15.0f,
4291
4292 // Batch 4
4293 7.0f, 8.0f, 9.0f,
4294
4295 // Batch 5
4296 16.0f, 17.0f, 18.0f,
4297 }));
4298
4299 return result;
4300}
4301
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004302LayerTestResult<float, 2> Concatenation2dDim0Test(
4303 armnn::IWorkloadFactory& workloadFactory,
4304 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004305{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004306 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004307}
4308
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004310LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4311 armnn::IWorkloadFactory& workloadFactory,
4312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4313 float qScale,
4314 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004315{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004316 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004317
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004318 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4319 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4320
telsoa014fcda012018-03-09 14:13:49 +00004321 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4322 // Batch 0
4323 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4324
4325 // Batch 1
4326 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4327 }));
4328
4329 return result;
4330}
4331
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004332LayerTestResult<float, 2> Concatenation2dDim1Test(
4333 armnn::IWorkloadFactory& workloadFactory,
4334 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004335{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004336 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004337}
4338
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004339template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004340LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4341 armnn::IWorkloadFactory& workloadFactory,
4342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4343 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004344 int32_t qOffset)
4345{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004346 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004347 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4348 // Batch 0
4349 1.0f, 2.0f, 3.0f,
4350
4351 // Batch 1
4352 10.0f, 11.0f, 12.0f,
4353 }));
4354
Jim Flynncbb66aa2019-05-15 13:03:54 +01004355 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004356 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4357 // Batch 0
4358 4.0f, 5.0f, 6.0f,
4359
4360 // Batch 1
4361 13.0f, 14.0f, 15.0f,
4362
4363 // Batch 0
4364 7.0f, 8.0f, 9.0f,
4365 }));
4366
Jim Flynncbb66aa2019-05-15 13:03:54 +01004367 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004368 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4369 // Batch 1
4370 16.0f, 17.0f, 18.0f,
4371 }));
4372
Jim Flynncbb66aa2019-05-15 13:03:54 +01004373 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004374 LayerTestResult<T, 2> result(outputTensorInfo);
4375
4376 std::vector<T> output;
4377 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004378 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004379 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4380 { input0.data(), input1.data(), input2.data() },
4381 outputTensorInfo,
4382 output.data(),
4383 0,
4384 true);
telsoa014fcda012018-03-09 14:13:49 +00004385
4386 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4387 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4388 // Batch 0
4389 1.0f, 2.0f, 3.0f,
4390
4391 // Batch 1
4392 10.0f, 11.0f, 12.0f,
4393
4394 // Batch 2
4395 4.0f, 5.0f, 6.0f,
4396
4397 // Batch 3
4398 13.0f, 14.0f, 15.0f,
4399
4400 // Batch 4
4401 7.0f, 8.0f, 9.0f,
4402
4403 // Batch 5
4404 16.0f, 17.0f, 18.0f,
4405 }));
4406
4407 return result;
4408}
4409
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004410LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
4411 armnn::IWorkloadFactory& workloadFactory,
4412 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004413{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004414 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4415 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004416}
4417
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004418template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004419LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
4420 armnn::IWorkloadFactory& workloadFactory,
4421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4422 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004423 int32_t qOffset)
4424{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004425 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004426 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4427 // Batch 0
4428 1.0f, 2.0f, 3.0f,
4429
4430 // Batch 1
4431 10.0f, 11.0f, 12.0f,
4432 }));
4433
Jim Flynncbb66aa2019-05-15 13:03:54 +01004434 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004435 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4436 // Batch 0
4437 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
4438
4439 // Batch 1
4440 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
4441 }));
4442
Jim Flynncbb66aa2019-05-15 13:03:54 +01004443 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004444 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4445 // Batch 0
4446 9.0f,
4447
4448 // Batch 1
4449 18.0f
4450 }));
4451
Jim Flynncbb66aa2019-05-15 13:03:54 +01004452 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004453 LayerTestResult<T, 2> result(outputTensorInfo);
4454
4455 std::vector<T> output;
4456 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004457 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004458 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4459 { input0.data(), input1.data(), input2.data() },
4460 outputTensorInfo,
4461 output.data(),
4462 1,
4463 true);
telsoa014fcda012018-03-09 14:13:49 +00004464
4465 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4466 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4467 // Batch 0
4468 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4469
4470 // Batch 1
4471 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
4472 }));
4473
4474 return result;
4475}
4476
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004477LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
4478 armnn::IWorkloadFactory& workloadFactory,
4479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004480{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004481 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4482 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004483}
4484
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004485template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004486LayerTestResult<T, 3> Concatenation3dTestImpl(
4487 armnn::IWorkloadFactory& workloadFactory,
4488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004489 const armnn::TensorInfo& outputTensorInfo,
4490 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00004491 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00004492 float qScale,
4493 int32_t qOffset)
4494{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004495 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004496
4497 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4498 // Batch 0, Channel 0
4499 1.0f, 2.0f,
4500
4501 // Batch 0, Channel 1
4502 3.0f, 4.0f,
4503
4504 // Batch 0, Channel 2
4505 5.0f, 6.0f,
4506
4507 // Batch 1, Channel 0
4508 19.0f, 20.0f,
4509
4510 // Batch 1, Channel 1
4511 21.0f, 22.0f,
4512
4513 // Batch 1, Channel 2
4514 23.0f, 24.0f
4515 }));
4516
4517 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4518 // Batch 0, Channel 0
4519 7.0f, 8.0f,
4520
4521 // Batch 0, Channel 1
4522 9.0f, 10.0f,
4523
4524 // Batch 0, Channel 2
4525 11.0f, 12.0f,
4526
4527 // Batch 1, Channel 0
4528 25.0f, 26.0f,
4529
4530 // Batch 1, Channel 1
4531 27.0f, 28.0f,
4532
4533 // Batch 1, Channel 2
4534 29.0f, 30.0f
4535 }));
4536
4537 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4538 // Batch 0, Channel 0
4539 13.0f, 14.0f,
4540
4541 // Batch 0, Channel 1
4542 15.0f, 16.0f,
4543
4544 // Batch 0, Channel 2
4545 17.0f, 18.0f,
4546
4547 // Batch 1, Channel 0
4548 31.0f, 32.0f,
4549
4550 // Batch 1, Channel 1
4551 33.0f, 34.0f,
4552
4553 // Batch 1, Channel 2
4554 35.0f, 36.0f
4555 }));
4556
4557 LayerTestResult<T, 3> result(outputTensorInfo);
4558
4559 std::vector<T> output;
4560 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004561 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004562 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4563 { input0.data(), input1.data(), input2.data() },
4564 outputTensorInfo,
4565 output.data(),
4566 dimension,
4567 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004568
4569 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4570 return result;
4571}
4572
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004573template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004574LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
4575 armnn::IWorkloadFactory& workloadFactory,
4576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4577 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004578 int32_t qOffset)
4579{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004580 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004581
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004582 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4583 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4584
telsoa014fcda012018-03-09 14:13:49 +00004585 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4586 // Batch 0, Channel 0
4587 1.0f, 2.0f,
4588
4589 // Batch 0, Channel 1
4590 3.0f, 4.0f,
4591
4592 // Batch 0, Channel 2
4593 5.0f, 6.0f,
4594
4595 // Batch 1, Channel 0
4596 19.0f, 20.0f,
4597
4598 // Batch 1, Channel 1
4599 21.0f, 22.0f,
4600
4601 // Batch 1, Channel 2
4602 23.0f, 24.0f,
4603
4604 // Batch 2, Channel 0
4605 7.0f, 8.0f,
4606
4607 // Batch 2, Channel 1
4608 9.0f, 10.0f,
4609
4610 // Batch 2, Channel 2
4611 11.0f, 12.0f,
4612
4613 // Batch 3, Channel 0
4614 25.0f, 26.0f,
4615
4616 // Batch 3, Channel 1
4617 27.0f, 28.0f,
4618
4619 // Batch 3, Channel 2
4620 29.0f, 30.0f,
4621
4622 // Batch 4, Channel 0
4623 13.0f, 14.0f,
4624
4625 // Batch 4, Channel 1
4626 15.0f, 16.0f,
4627
4628 // Batch 4, Channel 2
4629 17.0f, 18.0f,
4630
4631 // Batch 5, Channel 0
4632 31.0f, 32.0f,
4633
4634 // Batch 5, Channel 1
4635 33.0f, 34.0f,
4636
4637 // Batch 5, Channel 2
4638 35.0f, 36.0f
4639 }));
narpra015cdda352018-11-19 15:30:27 +00004640
telsoa014fcda012018-03-09 14:13:49 +00004641 return result;
4642}
4643
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004644LayerTestResult<float, 3> Concatenation3dDim0Test(
4645 armnn::IWorkloadFactory& workloadFactory,
4646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004647{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004648 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004649}
4650
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004651template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004652LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4653 armnn::IWorkloadFactory& workloadFactory,
4654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4655 float qScale,
4656 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004657{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004658 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004659
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004660 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4661 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004662
telsoa014fcda012018-03-09 14:13:49 +00004663 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4664 // Batch 0, Channel 0
4665 1.0f, 2.0f,
4666
4667 // Batch 0, Channel 1
4668 3.0f, 4.0f,
4669
4670 // Batch 0, Channel 2
4671 5.0f, 6.0f,
4672
4673 // Batch 0, Channel 3
4674 7.0f, 8.0f,
4675
4676 // Batch 0, Channel 4
4677 9.0f, 10.0f,
4678
4679 // Batch 0, Channel 5
4680 11.0f, 12.0f,
4681
4682 // Batch 0, Channel 6
4683 13.0f, 14.0f,
4684
4685 // Batch 0, Channel 7
4686 15.0f, 16.0f,
4687
4688 // Batch 0, Channel 8
4689 17.0f, 18.0f,
4690
4691 // Batch 1, Channel 0
4692 19.0f, 20.0f,
4693
4694 // Batch 1, Channel 1
4695 21.0f, 22.0f,
4696
4697 // Batch 1, Channel 2
4698 23.0f, 24.0f,
4699
4700 // Batch 1, Channel 3
4701 25.0f, 26.0f,
4702
4703 // Batch 1, Channel 4
4704 27.0f, 28.0f,
4705
4706 // Batch 1, Channel 5
4707 29.0f, 30.0f,
4708
4709 // Batch 1, Channel 6
4710 31.0f, 32.0f,
4711
4712 // Batch 1, Channel 7
4713 33.0f, 34.0f,
4714
4715 // Batch 1, Channel 8
4716 35.0f, 36.0f
4717 }));
4718
4719 return result;
4720}
4721
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004722LayerTestResult<float, 3> Concatenation3dDim1Test(
4723 armnn::IWorkloadFactory& workloadFactory,
4724 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004725{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004726 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004727}
4728
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004729template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004730LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4731 armnn::IWorkloadFactory& workloadFactory,
4732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004733 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004734 float qScale,
4735 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004736{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004737 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004738
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004739 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4740 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004741
telsoa014fcda012018-03-09 14:13:49 +00004742 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4743 // Batch 0, Channel 0
4744 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4745
4746 // Batch 0, Channel 1
4747 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
4748
4749 // Batch 0, Channel 2
4750 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
4751
4752 // Batch 1, Channel 0
4753 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
4754
4755 // Batch 1, Channel 1
4756 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
4757
4758 // Batch 1, Channel 2
4759 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
4760 }));
4761
4762 return result;
4763}
4764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004765LayerTestResult<float, 3> Concatenation3dDim2Test(
4766 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4768 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004769{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004770 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
4771 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004772}
4773
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004774template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004775LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4776 armnn::IWorkloadFactory& workloadFactory,
4777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4778 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004779 int32_t qOffset)
4780{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004781 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004782 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4783 // Batch 0, Channel 0
4784 1.0f, 2.0f,
4785
4786 // Batch 0, Channel 1
4787 3.0f, 4.0f,
4788
4789 // Batch 0, Channel 2
4790 5.0f, 6.0f,
4791
4792 // Batch 1, Channel 0
4793 19.0f, 20.0f,
4794
4795 // Batch 1, Channel 1
4796 21.0f, 22.0f,
4797
4798 // Batch 1, Channel 2
4799 23.0f, 24.0f
4800 }));
4801
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004802 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004803 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4804 // Batch 0, Channel 0
4805 7.0f, 8.0f,
4806
4807 // Batch 0, Channel 1
4808 9.0f, 10.0f,
4809
4810 // Batch 0, Channel 2
4811 11.0f, 12.0f,
4812 }));
4813
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004814 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004815 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4816 // Batch 0, Channel 0
4817 25.0f, 26.0f,
4818
4819 // Batch 0, Channel 1
4820 27.0f, 28.0f,
4821
4822 // Batch 0, Channel 2
4823 29.0f, 30.0f,
4824
4825 // Batch 1, Channel 0
4826 13.0f, 14.0f,
4827
4828 // Batch 1, Channel 1
4829 15.0f, 16.0f,
4830
4831 // Batch 1, Channel 2
4832 17.0f, 18.0f,
4833
4834 // Batch 2, Channel 0
4835 31.0f, 32.0f,
4836
4837 // Batch 2, Channel 1
4838 33.0f, 34.0f,
4839
4840 // Batch 2, Channel 2
4841 35.0f, 36.0f
4842 }));
4843
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004844 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004845 LayerTestResult<T, 3> result(outputTensorInfo);
4846
4847 std::vector<T> output;
4848 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004849 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004850 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4851 { input0.data(), input1.data(), input2.data() },
4852 outputTensorInfo,
4853 output.data(),
4854 0,
4855 true);
telsoa014fcda012018-03-09 14:13:49 +00004856
4857 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4858 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4859 // Batch 0, Channel 0
4860 1.0f, 2.0f,
4861
4862 // Batch 0, Channel 1
4863 3.0f, 4.0f,
4864
4865 // Batch 0, Channel 2
4866 5.0f, 6.0f,
4867
4868 // Batch 1, Channel 0
4869 19.0f, 20.0f,
4870
4871 // Batch 1, Channel 1
4872 21.0f, 22.0f,
4873
4874 // Batch 1, Channel 2
4875 23.0f, 24.0f,
4876
4877 // Batch 2, Channel 0
4878 7.0f, 8.0f,
4879
4880 // Batch 2, Channel 1
4881 9.0f, 10.0f,
4882
4883 // Batch 2, Channel 2
4884 11.0f, 12.0f,
4885
4886 // Batch 3, Channel 0
4887 25.0f, 26.0f,
4888
4889 // Batch 3, Channel 1
4890 27.0f, 28.0f,
4891
4892 // Batch 3, Channel 2
4893 29.0f, 30.0f,
4894
4895 // Batch 4, Channel 0
4896 13.0f, 14.0f,
4897
4898 // Batch 4, Channel 1
4899 15.0f, 16.0f,
4900
4901 // Batch 4, Channel 2
4902 17.0f, 18.0f,
4903
4904 // Batch 5, Channel 0
4905 31.0f, 32.0f,
4906
4907 // Batch 5, Channel 1
4908 33.0f, 34.0f,
4909
4910 // Batch 5, Channel 2
4911 35.0f, 36.0f
4912 }));
4913
4914 return result;
4915}
4916
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004917LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
4918 armnn::IWorkloadFactory& workloadFactory,
4919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004920{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004921 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4922 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004923}
4924
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004925template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004926LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
4927 armnn::IWorkloadFactory& workloadFactory,
4928 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4929 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004930 int32_t qOffset)
4931{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004932 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004933 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4934 // Batch 0, Channel 0
4935 1.0f, 2.0f,
4936
4937 // Batch 0, Channel 1
4938 3.0f, 4.0f,
4939
4940 // Batch 0, Channel 2
4941 5.0f, 6.0f,
4942
4943 // Batch 1, Channel 0
4944 19.0f, 20.0f,
4945
4946 // Batch 1, Channel 1
4947 21.0f, 22.0f,
4948
4949 // Batch 1, Channel 2
4950 23.0f, 24.0f
4951 }));
4952
Jim Flynncbb66aa2019-05-15 13:03:54 +01004953 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004954 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4955 // Batch 0, Channel 0
4956 7.0f, 8.0f,
4957
4958 // Batch 0, Channel 1
4959 9.0f, 10.0f,
4960
4961 // Batch 0, Channel 2
4962 11.0f, 12.0f,
4963
4964 // Batch 0, Channel 3
4965 25.0f, 26.0f,
4966
4967 // Batch 1, Channel 0
4968 27.0f, 28.0f,
4969
4970 // Batch 1, Channel 1
4971 29.0f, 30.0f,
4972
4973 // Batch 1, Channel 2
4974 13.0f, 14.0f,
4975
4976 // Batch 1, Channel 3
4977 15.0f, 16.0f,
4978 }));
4979
Jim Flynncbb66aa2019-05-15 13:03:54 +01004980 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004981 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4982 // Batch 0, Channel 0
4983 17.0f, 18.0f,
4984
4985 // Batch 1, Channel 0
4986 31.0f, 32.0f,
4987 }));
4988
Jim Flynncbb66aa2019-05-15 13:03:54 +01004989 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004990 LayerTestResult<T, 3> result(outputTensorInfo);
4991
4992 std::vector<T> output;
4993 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004994 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004995 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4996 { input0.data(), input1.data(), input2.data() },
4997 outputTensorInfo,
4998 output.data(),
4999 1,
5000 true);
telsoa014fcda012018-03-09 14:13:49 +00005001
5002 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5003 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5004 // Batch 0, Channel 0
5005 1.0f, 2.0f,
5006
5007 // Batch 0, Channel 1
5008 3.0f, 4.0f,
5009
5010 // Batch 0, Channel 2
5011 5.0f, 6.0f,
5012
5013 // Batch 0, Channel 3
5014 7.0f, 8.0f,
5015
5016 // Batch 0, Channel 4
5017 9.0f, 10.0f,
5018
5019 // Batch 0, Channel 5
5020 11.0f, 12.0f,
5021
5022 // Batch 0, Channel 6
5023 25.0f, 26.0f,
5024
5025 // Batch 0, Channel 7
5026 17.0f, 18.0f,
5027
5028 // Batch 1, Channel 0
5029 19.0f, 20.0f,
5030
5031 // Batch 1, Channel 1
5032 21.0f, 22.0f,
5033
5034 // Batch 1, Channel 2
5035 23.0f, 24.0f,
5036
5037 // Batch 1, Channel 3
5038 27.0f, 28.0f,
5039
5040 // Batch 1, Channel 4
5041 29.0f, 30.0f,
5042
5043 // Batch 1, Channel 5
5044 13.0f, 14.0f,
5045
5046 // Batch 1, Channel 6
5047 15.0f, 16.0f,
5048
5049 // Batch 1, Channel 7
5050 31.0f, 32.0f,
5051 }));
5052
5053 return result;
5054}
5055
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005056LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5057 armnn::IWorkloadFactory& workloadFactory,
5058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005059{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005060 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5061 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005062}
5063
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005064template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005065LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5066 armnn::IWorkloadFactory& workloadFactory,
5067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005068 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005069 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005070 int32_t qOffset)
5071{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005072 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005073 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5074 // Batch 0, Channel 0
5075 1.0f, 2.0f,
5076
5077 // Batch 0, Channel 1
5078 3.0f, 4.0f,
5079
5080 // Batch 0, Channel 2
5081 5.0f, 6.0f,
5082
5083 // Batch 1, Channel 0
5084 19.0f, 20.0f,
5085
5086 // Batch 1, Channel 1
5087 21.0f, 22.0f,
5088
5089 // Batch 1, Channel 2
5090 23.0f, 24.0f
5091 }));
5092
Jim Flynncbb66aa2019-05-15 13:03:54 +01005093 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005094 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5095 // Batch 0, Channel 0
5096 7.0f,
5097
5098 // Batch 0, Channel 1
5099 9.0f,
5100
5101 // Batch 0, Channel 2
5102 11.0f,
5103
5104 // Batch 1, Channel 0
5105 25.0f,
5106
5107 // Batch 1, Channel 1
5108 27.0f,
5109
5110 // Batch 1, Channel 2
5111 29.0f
5112 }));
5113
Jim Flynncbb66aa2019-05-15 13:03:54 +01005114 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005115 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5116 // Batch 0, Channel 0
5117 13.0f, 14.0f, 50.0f,
5118
5119 // Batch 0, Channel 1
5120 15.0f, 16.0f, 51.0f,
5121
5122 // Batch 0, Channel 2
5123 17.0f, 18.0f, 52.0f,
5124
5125 // Batch 1, Channel 0
5126 31.0f, 32.0f, 53.0f,
5127
5128 // Batch 1, Channel 1
5129 33.0f, 34.0f, 54.0f,
5130
5131 // Batch 1, Channel 2
5132 35.0f, 36.0f, 55.0f,
5133 }));
5134
Jim Flynncbb66aa2019-05-15 13:03:54 +01005135 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005136 LayerTestResult<T, 3> result(outputTensorInfo);
5137
5138 std::vector<T> output;
5139 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005140 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005141 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5142 { input0.data(), input1.data(), input2.data() },
5143 outputTensorInfo,
5144 output.data(),
5145 2,
5146 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005147
5148 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5149 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5150 // Batch 0, Channel 0
5151 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5152
5153 // Batch 0, Channel 1
5154 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5155
5156 // Batch 0, Channel 2
5157 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5158
5159 // Batch 1, Channel 0
5160 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5161
5162 // Batch 1, Channel 1
5163 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5164
5165 // Batch 1, Channel 2
5166 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5167 }));
5168
5169 return result;
5170}
5171
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005172LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5173 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005174 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5175 bool useSubtensor)
5176{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005177 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5178 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005179}
5180
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005181template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005182LayerTestResult<T, 4> Concatenation4dTestImpl(
5183 armnn::IWorkloadFactory& workloadFactory,
5184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5185 const armnn::TensorInfo& outputTensorInfo,
5186 unsigned int dimension,
5187 bool useSubtensor,
5188 float qScale,
5189 int32_t qOffset)
5190{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005191 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005192
5193 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5194 1.0f, 2.0f,
5195 3.0f, 4.0f,
5196 5.0f, 6.0f,
5197 7.0f, 8.0f,
5198 9.0f, 10.0f,
5199 11.0f, 12.0f
5200 }));
5201
5202 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5203 11.0f, 12.0f,
5204 13.0f, 14.0f,
5205 15.0f, 16.0f,
5206 17.0f, 18.0f,
5207 19.0f, 20.0f,
5208 21.0f, 22.0f
5209 }));
5210
5211 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5212 21.0f, 22.0f,
5213 23.0f, 24.0f,
5214 25.0f, 26.0f,
5215 27.0f, 28.0f,
5216 29.0f, 30.0f,
5217 31.0f, 32.0f
5218 }));
5219
5220 LayerTestResult<T, 4> result(outputTensorInfo);
5221
5222 std::vector<T> output;
5223 output.resize(outputTensorInfo.GetNumElements());
5224
5225 Concatenate<T>(workloadFactory,
5226 memoryManager,
5227 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5228 {input0.data(), input1.data(), input2.data()},
5229 outputTensorInfo,
5230 output.data(),
5231 dimension,
5232 useSubtensor);
5233
5234 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5235 return result;
5236}
5237
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005238template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005239LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5240 armnn::IWorkloadFactory& workloadFactory,
5241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5242 float qScale,
5243 int32_t qOffset)
5244{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005245 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005246
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005247 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5248 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5249
narpra015cdda352018-11-19 15:30:27 +00005250 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5251 1.0f, 2.0f,
5252 3.0f, 4.0f,
5253 5.0f, 6.0f,
5254 7.0f, 8.0f,
5255 9.0f, 10.0f,
5256 11.0f, 12.0f,
5257
5258 11.0f, 12.0f,
5259 13.0f, 14.0f,
5260 15.0f, 16.0f,
5261 17.0f, 18.0f,
5262 19.0f, 20.0f,
5263 21.0f, 22.0f,
5264
5265 21.0f, 22.0f,
5266 23.0f, 24.0f,
5267 25.0f, 26.0f,
5268 27.0f, 28.0f,
5269 29.0f, 30.0f,
5270 31.0f, 32.0f
5271 }));
5272 return result;
5273}
5274
5275LayerTestResult<float, 4> Concatenation4dDim0Test(
5276 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005278{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005279 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005280}
5281
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005282template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005283LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5284 armnn::IWorkloadFactory& workloadFactory,
5285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5286 float qScale,
5287 int32_t qOffset)
5288{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005289 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005290
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005291 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5292 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5293
narpra015cdda352018-11-19 15:30:27 +00005294 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5295 1.0f, 2.0f,
5296 3.0f, 4.0f,
5297 5.0f, 6.0f,
5298 7.0f, 8.0f,
5299 9.0f, 10.0f,
5300 11.0f, 12.0f,
5301
5302 11.0f, 12.0f,
5303 13.0f, 14.0f,
5304 15.0f, 16.0f,
5305 17.0f, 18.0f,
5306 19.0f, 20.0f,
5307 21.0f, 22.0f,
5308
5309 21.0f, 22.0f,
5310 23.0f, 24.0f,
5311 25.0f, 26.0f,
5312 27.0f, 28.0f,
5313 29.0f, 30.0f,
5314 31.0f, 32.0f
5315 }));
5316
5317 return result;
5318}
5319
5320LayerTestResult<float, 4> Concatenation4dDim1Test(
5321 armnn::IWorkloadFactory& workloadFactory,
5322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5323{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005324 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005325}
5326
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005327template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005328LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5329 armnn::IWorkloadFactory& workloadFactory,
5330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5331 float qScale,
5332 int32_t qOffset)
5333{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005334 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005335
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005336 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5337 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5338
narpra015cdda352018-11-19 15:30:27 +00005339 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5340 1.0f, 2.0f,
5341 3.0f, 4.0f,
5342 11.0f, 12.0f,
5343 13.0f, 14.0f,
5344 21.0f, 22.0f,
5345 23.0f, 24.0f,
5346
5347 5.0f, 6.0f,
5348 7.0f, 8.0f,
5349 15.0f, 16.0f,
5350 17.0f, 18.0f,
5351 25.0f, 26.0f,
5352 27.0f, 28.0f,
5353
5354 9.0f, 10.0f,
5355 11.0f, 12.0f,
5356 19.0f, 20.0f,
5357 21.0f, 22.0f,
5358 29.0f, 30.0f,
5359 31.0f, 32.0f
5360 }));
5361
5362 return result;
5363}
5364
5365LayerTestResult<float, 4> Concatenation4dDim2Test(
5366 armnn::IWorkloadFactory& workloadFactory,
5367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5368{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005369 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005370}
5371
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005372template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005373LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5374 armnn::IWorkloadFactory& workloadFactory,
5375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5376 float qScale,
5377 int32_t qOffset,
5378 bool useSubtensor)
5379{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005380 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005381
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005382 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5383 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5384
narpra015cdda352018-11-19 15:30:27 +00005385 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5386 1.0f, 2.0f,
5387 11.0f, 12.0f,
5388 21.0f, 22.0f,
5389 3.0f, 4.0f,
5390 13.0f, 14.0f,
5391 23.0f, 24.0f,
5392
5393 5.0f, 6.0f,
5394 15.0f, 16.0f,
5395 25.0f, 26.0f,
5396 7.0f, 8.0f,
5397 17.0f, 18.0f,
5398 27.0f, 28.0f,
5399
5400 9.0f, 10.0f,
5401 19.0f, 20.0f,
5402 29.0f, 30.0f,
5403 11.0f, 12.0f,
5404 21.0f, 22.0f,
5405 31.0f, 32.0f
5406 }));
5407
5408 return result;
5409}
5410
5411LayerTestResult<float, 4> Concatenation4dDim3Test(
5412 armnn::IWorkloadFactory& workloadFactory,
5413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5414 bool useSubtensor)
5415{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005416 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
5417 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00005418}
5419
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005420template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005421LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
5422 armnn::IWorkloadFactory& workloadFactory,
5423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5424 float qScale,
5425 int32_t qOffset)
5426{
5427 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005428 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005429
5430 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5431 1.0f, 2.0f,
5432 3.0f, 4.0f,
5433 5.0f, 6.0f,
5434 7.0f, 8.0f,
5435 9.0f, 10.0f,
5436 11.0f, 12.0f
5437 }));
5438
Jim Flynncbb66aa2019-05-15 13:03:54 +01005439 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005440
5441 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5442 11.0f, 12.0f,
5443 13.0f, 14.0f,
5444 15.0f, 16.0f,
5445 17.0f, 18.0f,
5446 19.0f, 20.0f,
5447 21.0f, 22.0f,
5448
5449 21.0f, 22.0f,
5450 23.0f, 24.0f,
5451 25.0f, 26.0f,
5452 27.0f, 28.0f,
5453 29.0f, 30.0f,
5454 31.0f, 32.0f
5455
5456 }));
5457
Jim Flynncbb66aa2019-05-15 13:03:54 +01005458 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005459
5460 LayerTestResult<T, 4> result(outputTensorInfo);
5461
5462 std::vector<T> output;
5463 output.resize(outputTensorInfo.GetNumElements());
5464 Concatenate<T>(workloadFactory,
5465 memoryManager,
5466 {inputTensorInfo0, inputTensorInfo1},
5467 {input0.data(), input1.data()},
5468 outputTensorInfo,
5469 output.data(),
5470 dimension,
5471 true);
5472
5473 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5474 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5475 1.0f, 2.0f,
5476 3.0f, 4.0f,
5477 5.0f, 6.0f,
5478 7.0f, 8.0f,
5479 9.0f, 10.0f,
5480 11.0f, 12.0f,
5481
5482 11.0f, 12.0f,
5483 13.0f, 14.0f,
5484 15.0f, 16.0f,
5485 17.0f, 18.0f,
5486 19.0f, 20.0f,
5487 21.0f, 22.0f,
5488
5489 21.0f, 22.0f,
5490 23.0f, 24.0f,
5491 25.0f, 26.0f,
5492 27.0f, 28.0f,
5493 29.0f, 30.0f,
5494 31.0f, 32.0f
5495 }));
5496
5497 return result;
5498}
5499
5500LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
5501 armnn::IWorkloadFactory& workloadFactory,
5502 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5503{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005504 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
5505 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005506}
5507
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005508template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005509LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
5510 armnn::IWorkloadFactory& workloadFactory,
5511 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5512 float qScale,
5513 int32_t qOffset)
5514{
5515 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005516 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005517
5518 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5519 1.0f, 2.0f,
5520 3.0f, 4.0f,
5521 5.0f, 6.0f,
5522 7.0f, 8.0f,
5523 9.0f, 10.0f,
5524 11.0f, 12.0f
5525 }));
5526
Jim Flynncbb66aa2019-05-15 13:03:54 +01005527 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005528
5529 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5530 11.0f, 12.0f,
5531 13.0f, 14.0f,
5532 15.0f, 16.0f,
5533 17.0f, 18.0f,
5534
5535 }));
5536
Jim Flynncbb66aa2019-05-15 13:03:54 +01005537 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005538
5539 LayerTestResult<T, 4> result(outputTensorInfo);
5540
5541 std::vector<T> output;
5542 output.resize(outputTensorInfo.GetNumElements());
5543 Concatenate<T>(workloadFactory,
5544 memoryManager,
5545 {inputTensorInfo0, inputTensorInfo1},
5546 {input0.data(), input1.data()},
5547 outputTensorInfo,
5548 output.data(),
5549 dimension,
5550 true);
5551
5552 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5553 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5554 1.0f, 2.0f,
5555 3.0f, 4.0f,
5556 5.0f, 6.0f,
5557 7.0f, 8.0f,
5558 9.0f, 10.0f,
5559 11.0f, 12.0f,
5560 11.0f, 12.0f,
5561 13.0f, 14.0f,
5562 15.0f, 16.0f,
5563 17.0f, 18.0f
5564 }));
5565
5566 return result;
5567}
5568
5569LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
5570 armnn::IWorkloadFactory& workloadFactory,
5571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5572{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005573 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
5574 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005575}
5576
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005577template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005578LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5579 armnn::IWorkloadFactory& workloadFactory,
5580 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5581 float qScale,
5582 int32_t qOffset)
5583{
5584 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005585 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005586
5587 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5588 1.0f, 2.0f,
5589 3.0f, 4.0f,
5590 5.0f, 6.0f,
5591 7.0f, 8.0f,
5592 9.0f, 10.0f,
5593 11.0f, 12.0f
5594 }));
5595
Jim Flynncbb66aa2019-05-15 13:03:54 +01005596 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005597
5598 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5599 11.0f, 12.0f,
5600 13.0f, 14.0f,
5601 15.0f, 16.0f,
5602 17.0f, 18.0f,
5603 19.0f, 20.0f,
5604 21.0f, 22.0f,
5605 23.0f, 24.0f,
5606 25.0f, 26.0f,
5607 27.0f, 28.0f
5608 }));
5609
Jim Flynncbb66aa2019-05-15 13:03:54 +01005610 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005611
5612 LayerTestResult<T, 4> result(outputTensorInfo);
5613
5614 std::vector<T> output;
5615 output.resize(outputTensorInfo.GetNumElements());
5616 Concatenate<T>(workloadFactory,
5617 memoryManager,
5618 {inputTensorInfo0, inputTensorInfo1},
5619 {input0.data(), input1.data()},
5620 outputTensorInfo,
5621 output.data(),
5622 dimension,
5623 true);
5624
5625 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5626 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5627 1.0f, 2.0f,
5628 3.0f, 4.0f,
5629 11.0f, 12.0f,
5630 13.0f, 14.0f,
5631 15.0f, 16.0f,
5632
5633 5.0f, 6.0f,
5634 7.0f, 8.0f,
5635 17.0f, 18.0f,
5636 19.0f, 20.0f,
5637 21.0f, 22.0f,
5638
5639 9.0f, 10.0f,
5640 11.0f, 12.0f,
5641 23.0f, 24.0f,
5642 25.0f, 26.0f,
5643 27.0f, 28.0f
5644 }));
5645
5646 return result;
5647}
5648
5649LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5650 armnn::IWorkloadFactory& workloadFactory,
5651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5652{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005653 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5654 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005655}
5656
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005657template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005658LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5659 armnn::IWorkloadFactory& workloadFactory,
5660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5661 float qScale,
5662 int32_t qOffset,
5663 bool useSubtensor)
5664{
5665 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005666 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005667
5668 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5669 1.0f, 2.0f,
5670 3.0f, 4.0f,
5671 5.0f, 6.0f,
5672 7.0f, 8.0f,
5673 9.0f, 10.0f,
5674 11.0f, 12.0f
5675 }));
5676
Jim Flynncbb66aa2019-05-15 13:03:54 +01005677 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005678
5679 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5680 11.0f, 12.0f, 13.0f,
5681 14.0f, 15.0f, 16.0f,
5682
5683 17.0f, 18.0f, 19.0f,
5684 20.0f, 21.0f, 22.0f,
5685
5686 23.0f, 24.0f, 25.0f,
5687 26.0f, 27.0f, 28.0f
5688 }));
5689
Jim Flynncbb66aa2019-05-15 13:03:54 +01005690 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005691
5692 LayerTestResult<T, 4> result(outputTensorInfo);
5693
5694 std::vector<T> output;
5695 output.resize(outputTensorInfo.GetNumElements());
5696 Concatenate<T>(workloadFactory,
5697 memoryManager,
5698 {inputTensorInfo0, inputTensorInfo1},
5699 {input0.data(), input1.data()},
5700 outputTensorInfo,
5701 output.data(),
5702 dimension,
5703 useSubtensor);
5704
5705 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5706 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5707 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5708 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5709 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5710 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5711 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5712 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5713 }));
5714
5715 return result;
5716}
5717
5718LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5719 armnn::IWorkloadFactory& workloadFactory,
5720 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5721 bool useSubtensor)
5722{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005723 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5724 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005725}
5726
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005727LayerTestResult<float, 2> FakeQuantizationTest(
5728 armnn::IWorkloadFactory& workloadFactory,
5729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005730{
5731 constexpr unsigned int width = 2;
5732 constexpr unsigned int height = 3;
5733
5734 const armnn::TensorInfo tensorInfo({height, width },
5735 armnn::DataType::Float32);
5736 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5737 -10.0f, -5.0f,
5738 0.0f, 5.0f,
5739 10.0f, 10.0f
5740 }));
5741
5742 LayerTestResult<float, 2> ret(tensorInfo);
5743
5744 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5745
5746 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5747
5748 armnn::FakeQuantizationQueueDescriptor data;
5749 armnn::WorkloadInfo info;
5750
5751 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5752 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5753 float min = -10.f;
5754 float max = 10.f;
5755
5756 data.m_Parameters.m_Min = min;
5757 data.m_Parameters.m_Max = max;
5758
5759 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5760 armnn::FakeQuantizationQueueDescriptor refData = data;
5761 armnn::WorkloadInfo refInfo = info;
5762 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5763
5764 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5765
5766 inputHandle->Allocate();
5767 outputHandle->Allocate();
5768
5769 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5770
Derek Lambertif30f7d32019-04-09 10:25:02 +01005771 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005772 workload->Execute();
5773
5774 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5775
5776 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5777 0.0f, 63.0f,
5778 128.0f, 191.0f,
5779 255.0f, 255.0f
5780 }));
5781 return ret;
5782}
5783
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005784namespace
5785{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005786template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5787LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005788 armnn::IWorkloadFactory& workloadFactory,
5789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5790 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005791 float scale,
5792 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005793 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005794 float outScale,
5795 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005796 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01005797 const armnn::DataLayout layout,
5798 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005799{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005800 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
5801 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005802
jimfly013aab7c32018-11-12 13:32:08 +00005803 // at this point if we require it permute the input data
5804 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5805 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005806 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005807 {
5808 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005809 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005810 inputData = tmp;
5811 }
5812
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005813 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
5814 inputTensorInfo.GetQuantizationScale(),
5815 inputTensorInfo.GetQuantizationOffset(),
5816 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005817
jimfly013aab7c32018-11-12 13:32:08 +00005818 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005819 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005820 {
5821 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005822 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
5823 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005824 expectedOutputData = tmp;
5825 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005826
5827 LayerTestResult<T, 4> result(outputTensorInfo);
5828 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
5829 outputTensorInfo.GetQuantizationScale(),
5830 outputTensorInfo.GetQuantizationOffset(),
5831 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005832
5833 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5834 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5835
5836 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01005837 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00005838 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005839 armnn::WorkloadInfo info;
5840
5841 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5842 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5843
5844 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5845
5846 inputHandle->Allocate();
5847 outputHandle->Allocate();
5848
5849 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5850
Derek Lambertif30f7d32019-04-09 10:25:02 +01005851 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005852 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005853
5854 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5855
5856 return result;
5857}
5858
5859float CalcInvL2Norm(std::initializer_list<float> elements)
5860{
5861 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5862 [](float acc, float element) { return acc + element * element; });
5863 return 1.0f / sqrtf(reduction);
5864}
5865
5866} // anonymous namespace
5867
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005868template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005869LayerTestResult<T, 2> Pad2dTestCommon(
5870 armnn::IWorkloadFactory& workloadFactory,
5871 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5872 float qScale,
David Monahan34757812019-06-19 11:47:21 +01005873 int32_t qOffset,
5874 const float customPaddingValue = 0)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005875{
Derek Lambertif30f7d32019-04-09 10:25:02 +01005876 const armnn::TensorShape inputShape{ 3, 3 };
5877 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005878
David Monahan34757812019-06-19 11:47:21 +01005879 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
5880 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005881
Derek Lambertif30f7d32019-04-09 10:25:02 +01005882 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005883 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005884 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005885 // Height (3) x Width (3)
5886 4, 8, 6,
5887 7, 4, 4,
5888 3, 2, 4
5889 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005890
David Monahan34757812019-06-19 11:47:21 +01005891 const T padValue = ConvertToDataType<T>(customPaddingValue, inputTensorInfo);
5892
5893 std::vector<T> expectedOutputValues;
5894 if (padValue == 0)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005895 {
David Monahan34757812019-06-19 11:47:21 +01005896 expectedOutputValues = (
5897 QuantizedVector<T>(qScale, qOffset,
5898 {
5899 0, 0, 0, 0, 0, 0, 0,
5900 0, 0, 0, 0, 0, 0, 0,
5901 0, 0, 4, 8, 6, 0, 0,
5902 0, 0, 7, 4, 4, 0, 0,
5903 0, 0, 3, 2, 4, 0, 0,
5904 0, 0, 0, 0, 0, 0, 0,
5905 0, 0, 0, 0, 0, 0, 0
5906 }));
5907 }
5908 else
5909 {
5910 expectedOutputValues = (
5911 QuantizedVector<T>(qScale, qOffset,
5912 {
5913 1, 1, 1, 1, 1, 1, 1,
5914 1, 1, 1, 1, 1, 1, 1,
5915 1, 1, 4, 8, 6, 1, 1,
5916 1, 1, 7, 4, 4, 1, 1,
5917 1, 1, 3, 2, 4, 1, 1,
5918 1, 1, 1, 1, 1, 1, 1,
5919 1, 1, 1, 1, 1, 1, 1
5920 }));
5921 }
5922
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005923
Derek Lambertif30f7d32019-04-09 10:25:02 +01005924 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005925
Derek Lambertif30f7d32019-04-09 10:25:02 +01005926 LayerTestResult<T, 2> result(outputTensorInfo);
5927 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005928
Derek Lambertif30f7d32019-04-09 10:25:02 +01005929 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5930 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005931
Derek Lambertif30f7d32019-04-09 10:25:02 +01005932 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005933
Derek Lambertif30f7d32019-04-09 10:25:02 +01005934 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5935 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5936 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005937
Derek Lambertif30f7d32019-04-09 10:25:02 +01005938 descriptor.m_Parameters.m_PadList = PadList;
5939 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005940
Derek Lambertif30f7d32019-04-09 10:25:02 +01005941 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5942 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005943
Derek Lambertif30f7d32019-04-09 10:25:02 +01005944 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005945
Derek Lambertif30f7d32019-04-09 10:25:02 +01005946 inputHandle->Allocate();
5947 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005948
Derek Lambertif30f7d32019-04-09 10:25:02 +01005949 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005950
Derek Lambertif30f7d32019-04-09 10:25:02 +01005951 workload->PostAllocationConfigure();
5952 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005953
Derek Lambertif30f7d32019-04-09 10:25:02 +01005954 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005955
Derek Lambertif30f7d32019-04-09 10:25:02 +01005956 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005957}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005958
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005959template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005960LayerTestResult<T, 3> Pad3dTestCommon(
5961 armnn::IWorkloadFactory& workloadFactory,
5962 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5963 float qScale,
5964 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005965{
5966 const armnn::TensorShape inputShape{ 2, 2, 2 };
5967 const armnn::TensorShape outputShape{ 3, 5, 6 };
5968
David Monahan34757812019-06-19 11:47:21 +01005969 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
5970 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005971
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005972 std::vector<T> inputValues(
5973 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005974 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005975 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005976 0, 4,
5977 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005978
5979 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005980 6, 1,
5981 5, 2
5982 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005983
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005984 std::vector<T> expectedOutputValues(
5985 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005986 {
5987
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005988 0, 0, 0, 0, 0, 0,
5989 0, 0, 0, 0, 0, 0,
5990 0, 0, 0, 4, 0, 0,
5991 0, 0, 2, 5, 0, 0,
5992 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005993
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005994 0, 0, 0, 0, 0, 0,
5995 0, 0, 0, 0, 0, 0,
5996 0, 0, 6, 1, 0, 0,
5997 0, 0, 5, 2, 0, 0,
5998 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005999
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006000 0, 0, 0, 0, 0, 0,
6001 0, 0, 0, 0, 0, 0,
6002 0, 0, 0, 0, 0, 0,
6003 0, 0, 0, 0, 0, 0,
6004 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006005
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006006 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006007
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006008 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006009
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006010 LayerTestResult<T, 3> result(outputTensorInfo);
6011 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006012
6013 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6014 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6015
6016 armnn::PadQueueDescriptor descriptor;
6017
6018 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6019 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6020 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6021 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6022
6023 descriptor.m_Parameters.m_PadList = PadList;
6024 armnn::WorkloadInfo info;
6025
6026 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6027 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6028
6029 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6030
6031 inputHandle->Allocate();
6032 outputHandle->Allocate();
6033
6034 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6035
Derek Lambertif30f7d32019-04-09 10:25:02 +01006036 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006037 workload->Execute();
6038
6039 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6040
6041 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006042}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006043
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006044template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006045LayerTestResult<T, 4> Pad4dTestCommon(
6046 armnn::IWorkloadFactory& workloadFactory,
6047 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6048 float qScale,
6049 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006050{
6051 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6052 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6053
David Monahan34757812019-06-19 11:47:21 +01006054 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6055 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006056
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006057 std::vector<T> inputValues(
6058 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006059 {
6060 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006061 0, 1,
6062 2, 3,
6063 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006064
6065 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006066 6, 7,
6067 8, 9,
6068 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006069
6070 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006071 12, 13,
6072 14, 15,
6073 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006074
6075 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006076 18, 19,
6077 20, 21,
6078 22, 23
6079 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006080
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006081 std::vector<T> expectedOutputValues(
6082 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006083 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006084 0, 0, 0, 0,
6085 0, 0, 0, 0,
6086 0, 0, 0, 0,
6087 0, 0, 0, 0,
6088 0, 0, 0, 0,
6089 0, 0, 0, 0,
6090 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006091
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006092 0, 0, 0, 0,
6093 0, 0, 0, 0,
6094 0, 0, 0, 0,
6095 0, 0, 0, 0,
6096 0, 0, 0, 0,
6097 0, 0, 0, 0,
6098 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006099
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006100 0, 0, 0, 0,
6101 0, 0, 0, 0,
6102 0, 0, 0, 0,
6103 0, 0, 0, 0,
6104 0, 0, 0, 0,
6105 0, 0, 0, 0,
6106 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006107
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006108 0, 0, 0, 0,
6109 0, 0, 0, 0,
6110 0, 0, 0, 0,
6111 0, 0, 0, 0,
6112 0, 0, 0, 0,
6113 0, 0, 0, 0,
6114 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006115
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006116 0, 0, 0, 0,
6117 0, 0, 0, 0,
6118 0, 0, 0, 0,
6119 0, 0, 0, 0,
6120 0, 0, 0, 0,
6121 0, 0, 0, 0,
6122 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006123
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006124 0, 0, 0, 0,
6125 0, 0, 0, 0,
6126 0, 0, 0, 0,
6127 0, 0, 0, 0,
6128 0, 0, 0, 0,
6129 0, 0, 0, 0,
6130 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006131
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006132 0, 0, 0, 0,
6133 0, 0, 0, 0,
6134 0, 0, 0, 0,
6135 0, 0, 0, 0,
6136 0, 0, 0, 0,
6137 0, 0, 0, 0,
6138 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006139
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006140 0, 0, 0, 0,
6141 0, 0, 0, 0,
6142 0, 0, 0, 0,
6143 0, 0, 1, 0,
6144 0, 2, 3, 0,
6145 0, 4, 5, 0,
6146 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006147
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006148 0, 0, 0, 0,
6149 0, 0, 0, 0,
6150 0, 0, 0, 0,
6151 0, 6, 7, 0,
6152 0, 8, 9, 0,
6153 0, 10, 11, 0,
6154 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006155
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006156 0, 0, 0, 0,
6157 0, 0, 0, 0,
6158 0, 0, 0, 0,
6159 0, 0, 0, 0,
6160 0, 0, 0, 0,
6161 0, 0, 0, 0,
6162 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006163
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006164 0, 0, 0, 0,
6165 0, 0, 0, 0,
6166 0, 0, 0, 0,
6167 0, 0, 0, 0,
6168 0, 0, 0, 0,
6169 0, 0, 0, 0,
6170 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006171
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006172 0, 0, 0, 0,
6173 0, 0, 0, 0,
6174 0, 0, 0, 0,
6175 0, 0, 0, 0,
6176 0, 0, 0, 0,
6177 0, 0, 0, 0,
6178 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006179
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006180 0, 0, 0, 0,
6181 0, 0, 0, 0,
6182 0, 0, 0, 0,
6183 0, 12, 13, 0,
6184 0, 14, 15, 0,
6185 0, 16, 17, 0,
6186 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006187
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006188 0, 0, 0, 0,
6189 0, 0, 0, 0,
6190 0, 0, 0, 0,
6191 0, 18, 19, 0,
6192 0, 20, 21, 0,
6193 0, 22, 23, 0,
6194 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006195
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006196 0, 0, 0, 0,
6197 0, 0, 0, 0,
6198 0, 0, 0, 0,
6199 0, 0, 0, 0,
6200 0, 0, 0, 0,
6201 0, 0, 0, 0,
6202 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006203
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006204 0, 0, 0, 0,
6205 0, 0, 0, 0,
6206 0, 0, 0, 0,
6207 0, 0, 0, 0,
6208 0, 0, 0, 0,
6209 0, 0, 0, 0,
6210 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006211
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006212 0, 0, 0, 0,
6213 0, 0, 0, 0,
6214 0, 0, 0, 0,
6215 0, 0, 0, 0,
6216 0, 0, 0, 0,
6217 0, 0, 0, 0,
6218 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006219
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006220 0, 0, 0, 0,
6221 0, 0, 0, 0,
6222 0, 0, 0, 0,
6223 0, 0, 0, 0,
6224 0, 0, 0, 0,
6225 0, 0, 0, 0,
6226 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006227
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006228 0, 0, 0, 0,
6229 0, 0, 0, 0,
6230 0, 0, 0, 0,
6231 0, 0, 0, 0,
6232 0, 0, 0, 0,
6233 0, 0, 0, 0,
6234 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006235
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006236 0, 0, 0, 0,
6237 0, 0, 0, 0,
6238 0, 0, 0, 0,
6239 0, 0, 0, 0,
6240 0, 0, 0, 0,
6241 0, 0, 0, 0,
6242 0, 0, 0, 0
6243 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006244
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006245 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006246
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006247 LayerTestResult<T, 4> result(outputTensorInfo);
6248 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006249
6250 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6251 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6252
6253 armnn::PadQueueDescriptor descriptor;
6254
6255 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6256 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6257 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6258 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6259 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6260
6261 descriptor.m_Parameters.m_PadList = PadList;
6262 armnn::WorkloadInfo info;
6263
6264 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6265 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6266
6267 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6268
6269 inputHandle->Allocate();
6270 outputHandle->Allocate();
6271
6272 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6273
Derek Lambertif30f7d32019-04-09 10:25:02 +01006274 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006275 workload->Execute();
6276
6277 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6278
6279 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006280}
6281
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006282LayerTestResult<uint8_t, 2> PadUint82dTest(
6283 armnn::IWorkloadFactory& workloadFactory,
6284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006285{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006286 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006287}
6288
David Monahan34757812019-06-19 11:47:21 +01006289LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
6290 armnn::IWorkloadFactory& workloadFactory,
6291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6292{
6293 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
6294}
6295
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006296LayerTestResult<uint8_t, 3> PadUint83dTest(
6297 armnn::IWorkloadFactory& workloadFactory,
6298 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006299{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006300 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006301}
6302
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006303LayerTestResult<uint8_t, 4> PadUint84dTest(
6304 armnn::IWorkloadFactory& workloadFactory,
6305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006306{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006307 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006308}
6309
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006310LayerTestResult<float, 2> PadFloat322dTest(
6311 armnn::IWorkloadFactory& workloadFactory,
6312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006313{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006314 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006315}
6316
David Monahan34757812019-06-19 11:47:21 +01006317LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
6318 armnn::IWorkloadFactory& workloadFactory,
6319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6320{
6321 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6322}
6323
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006324LayerTestResult<float, 3> PadFloat323dTest(
6325 armnn::IWorkloadFactory& workloadFactory,
6326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006327{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006328 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006329}
6330
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006331LayerTestResult<float, 4> PadFloat324dTest(
6332 armnn::IWorkloadFactory& workloadFactory,
6333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006334{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006335 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006336}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006337
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006338template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01006339LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6340 armnn::IWorkloadFactory& workloadFactory,
6341 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6342 float scale,
6343 int32_t offset,
6344 float outScale,
6345 int32_t outOffset,
6346 const armnn::DataLayout layout,
6347 float epsilon)
6348{
6349 // Width: 1
6350 // Height: 1
6351 // Channels: 3
6352 // BatchSize: 1
6353 unsigned int numberOfBatches = 1;
6354 unsigned int numberOfChannels = 3;
6355 unsigned int height = 1;
6356 unsigned int width = 1;
6357
6358 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6359 numberOfBatches, numberOfChannels, height, width, layout);
6360
6361 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6362 std::vector<float> inputValues
6363 {
6364 // Batch 0, Channel 0, Height (1) x Width (1)
6365 0.00000001f,
6366
6367 // Batch 0, Channel 1, Height (1) x Width (1)
6368 0.00000002f,
6369
6370 // Batch 0, Channel 2, Height (1) x Width (1)
6371 0.00000003f,
6372 };
6373
6374 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6375 std::vector<float> expectedOutputValues
6376 {
6377 // Batch 0, Channel 0, Height (1) x Width (1)
6378 0.00000001f * approxInvL2Norm,
6379 0.00000002f * approxInvL2Norm,
6380 0.00000003f * approxInvL2Norm,
6381 };
6382
6383 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6384 inputValues, outScale, outOffset, expectedOutputValues, layout,
6385 epsilon);
6386}
6387
6388
6389template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006390LayerTestResult<T, 4> L2Normalization1dTestCommon(
6391 armnn::IWorkloadFactory& workloadFactory,
6392 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006393 float scale,
6394 int32_t offset,
6395 float outScale,
6396 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006397 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006398{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006399 // Width: 1
6400 // Height: 1
6401 // Channels: 10
6402 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006403 unsigned int numberOfBatches = 1;
6404 unsigned int numberOfChannels = 10;
6405 unsigned int height = 1;
6406 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006407
jimfly013aab7c32018-11-12 13:32:08 +00006408
Nina Drozdd41b2592018-11-19 13:03:36 +00006409 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006410 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006411 std::vector<float> inputValues
6412 {
6413 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006414 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006415
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006416 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006417 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006418
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006419 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006420 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006421
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006422 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006423 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006424
6425 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006426 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006427
6428 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006429 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006430
6431 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006432 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006433
6434 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006435 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006436
6437 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006438 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006439
6440 // Batch 0, Channel 9, Height (1) x Width (1)
6441 10.0f
6442 };
telsoa014fcda012018-03-09 14:13:49 +00006443 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006444 std::vector<float> expectedOutputValues
6445 {
6446 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006447 1.0f * approxInvL2Norm,
6448 2.0f * approxInvL2Norm,
6449 3.0f * approxInvL2Norm,
6450 4.0f * approxInvL2Norm,
6451 5.0f * approxInvL2Norm,
6452 6.0f * approxInvL2Norm,
6453 7.0f * approxInvL2Norm,
6454 8.0f * approxInvL2Norm,
6455 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00006456 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006457 };
telsoa014fcda012018-03-09 14:13:49 +00006458
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006459
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006460 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6461 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006462}
6463
Ferran Balaguere52211e2019-06-17 12:23:52 +01006464LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
6465 armnn::IWorkloadFactory& workloadFactory,
6466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6467 const armnn::DataLayout layout)
6468{
6469 // Dummy descriptor to get the default value of epsilon.
6470 armnn::L2NormalizationDescriptor descriptor;
6471
6472 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6473 layout, descriptor.m_Eps);
6474}
6475
6476LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
6477 armnn::IWorkloadFactory& workloadFactory,
6478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6479 const armnn::DataLayout layout)
6480{
6481 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6482 layout, 1e-9f);
6483}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006484
6485LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006486 armnn::IWorkloadFactory& workloadFactory,
6487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006488 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006489{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006490 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006491}
6492
6493LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
6494 armnn::IWorkloadFactory& workloadFactory,
6495 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6496 const armnn::DataLayout layout)
6497{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006498 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006499 layout);
6500}
6501
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006502LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
6503 armnn::IWorkloadFactory& workloadFactory,
6504 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6505 const armnn::DataLayout layout)
6506{
6507 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6508 1.f/128, 128, layout);
6509}
6510
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006511template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6512LayerTestResult<T, 4> L2Normalization2dTestCommon(
6513 armnn::IWorkloadFactory& workloadFactory,
6514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006515 float scale,
6516 int32_t offset,
6517 float outScale,
6518 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006519 const armnn::DataLayout layout)
6520{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006521 // Width: 5
6522 // Height: 1
6523 // Channels: 2
6524 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006525 unsigned int numberOfBatches = 1;
6526 unsigned int numberOfChannels = 2;
6527 unsigned int height = 1;
6528 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006529
Nina Drozdd41b2592018-11-19 13:03:36 +00006530 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006531 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006532 std::vector<float> inputValues
6533 {
6534 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006535 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006536
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006537 // Batch 0, Channel 1, Height (1) x Width (5)
6538 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6539 };
6540 std::vector<float> expectedOutputValues
6541 {
6542 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006543 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6544 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6545 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6546 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
6547 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006548
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006549 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006550 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6551 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6552 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6553 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006554 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006555 };
telsoa014fcda012018-03-09 14:13:49 +00006556
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006557 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6558 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006559}
telsoa014fcda012018-03-09 14:13:49 +00006560
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006561LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006562 armnn::IWorkloadFactory& workloadFactory,
6563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006564 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006565{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006566 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6567 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006568}
6569
6570LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
6571 armnn::IWorkloadFactory& workloadFactory,
6572 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6573 const armnn::DataLayout layout)
6574{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006575 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006576 layout);
6577}
6578
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006579LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
6580 armnn::IWorkloadFactory& workloadFactory,
6581 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6582 const armnn::DataLayout layout)
6583{
6584 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6585 1.f/128, 128, layout);
6586}
6587
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006588template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6589LayerTestResult<T, 4> L2Normalization3dTestCommon(
6590 armnn::IWorkloadFactory& workloadFactory,
6591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006592 float scale,
6593 int32_t offset,
6594 float outScale,
6595 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006596 const armnn::DataLayout layout)
6597{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006598 // Width: 3
6599 // Height: 4
6600 // Channels: 2
6601 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006602 unsigned int numberOfBatches = 1;
6603 unsigned int numberOfChannels = 2;
6604 unsigned int height = 4;
6605 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006606
Nina Drozdd41b2592018-11-19 13:03:36 +00006607 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006608 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006609 std::vector<float> inputValues
6610 {
6611 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006612 119.0f, 21.0f, 150.0f,
6613 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006614 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00006615 147.0f, 199.0f, 220.0f,
6616
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006617 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006618 110.0f, 140.0f, 73.0f,
6619 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006620 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006621 162.0f, 12.0f, 161.0f
6622 };
6623 std::vector<float> expectedOutputValues
6624 {
6625 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006626 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006627 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006628 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6629 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006630 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006631 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006632 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006633 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6634 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6635 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6636 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6637 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6638
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006639 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006640 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6641 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006642 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006643 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6644 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006645 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6646 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006647 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6648 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6649 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006650 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006651 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6652 };
telsoa014fcda012018-03-09 14:13:49 +00006653
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006654 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6655 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006656}
telsoa014fcda012018-03-09 14:13:49 +00006657
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006658LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006659 armnn::IWorkloadFactory& workloadFactory,
6660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006661 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006662{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006663 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6664 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006665}
6666
6667LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
6668 armnn::IWorkloadFactory& workloadFactory,
6669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6670 const armnn::DataLayout layout)
6671{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006672 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006673 layout);
6674}
6675
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006676LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
6677 armnn::IWorkloadFactory& workloadFactory,
6678 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6679 const armnn::DataLayout layout)
6680{
6681 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6682 1.f/128, 128, layout);
6683}
6684
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006685template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6686LayerTestResult<T, 4> L2Normalization4dTestCommon(
6687 armnn::IWorkloadFactory& workloadFactory,
6688 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006689 float scale,
6690 int32_t offset,
6691 float outScale,
6692 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006693 const armnn::DataLayout layout)
6694{
6695 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006696 // Height: 4
6697 // Channels: 3
6698 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006699 unsigned int numberOfBatches = 2;
6700 unsigned int numberOfChannels = 3;
6701 unsigned int height = 4;
6702 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006703
Nina Drozdd41b2592018-11-19 13:03:36 +00006704 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006705 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006706 std::vector<float> inputValues
6707 {
6708 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006709 235.0f, 46.0f, 178.0f,
6710 100.0f, 123.0f, 19.0f,
6711 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006712 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00006713
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006714 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006715 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006716 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00006717 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006718 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00006719
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006720 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006721 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00006722 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006723 12.0f, 209.0f, 200.0f,
6724 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00006725
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006726 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006727 67.0f, 90.0f, 49.0f,
6728 7.0f, 163.0f, 18.0f,
6729 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00006730 247.0f, 59.0f, 189.0f,
6731
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006732 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006733 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006734 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00006735 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006736 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00006737
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006738 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006739 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00006740 115.0f, 116.0f, 238.0f,
6741 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006742 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006743 };
6744 std::vector<float> expectedOutputValues
6745 {
6746 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006747 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006748 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006749 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6750 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6751 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006752 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006753 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006754 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006755 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006756 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006757 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006758 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006759
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006760 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006761 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006762 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006763 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006764 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006765 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006766 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006767 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6768 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6769 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006770 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6771 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6772 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006773
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006774 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006775 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006776 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6777 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6778 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006779 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006780 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006781 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006782 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6783 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006784 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6785 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6786 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006787
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006788 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006789 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6790 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6791 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6792 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006793 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006794 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6795 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006796 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6797 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6798 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006799 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006800 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6801
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006802 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006803 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6804 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6805 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006806 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006807 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6808 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6809 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6810 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006811 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6812 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006813 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006814 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006815
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006816 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006817 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006818 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6819 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6820 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6821 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6822 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6823 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006824 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006825 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006826 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006827 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006828 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006829 };
telsoa014fcda012018-03-09 14:13:49 +00006830
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006831 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6832 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006833}
6834
6835LayerTestResult<float, 4> L2Normalization4dTest(
6836 armnn::IWorkloadFactory& workloadFactory,
6837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6838 const armnn::DataLayout layout)
6839{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006840 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6841 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006842}
6843
6844LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
6845 armnn::IWorkloadFactory& workloadFactory,
6846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6847 const armnn::DataLayout layout)
6848{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006849 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006850 layout);
telsoa014fcda012018-03-09 14:13:49 +00006851}
6852
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006853LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
6854 armnn::IWorkloadFactory& workloadFactory,
6855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6856 const armnn::DataLayout layout)
6857{
6858 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6859 1.f/128, 128, layout);
6860}
6861
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006862template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006863LayerTestResult<T, 4> ConstantTestImpl(
6864 armnn::IWorkloadFactory& workloadFactory,
6865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00006866 float qScale,
6867 int32_t qOffset)
6868{
6869 constexpr unsigned int inputWidth = 3;
6870 constexpr unsigned int inputHeight = 4;
6871 constexpr unsigned int inputChannels = 3;
6872 constexpr unsigned int inputBatchSize = 2;
6873
6874 constexpr unsigned int outputWidth = inputWidth;
6875 constexpr unsigned int outputHeight = inputHeight;
6876 constexpr unsigned int outputChannels = inputChannels;
6877 constexpr unsigned int outputBatchSize = inputBatchSize;
6878
Nina Drozd58ef2c62019-05-16 12:09:18 +01006879 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6880 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006881
Nina Drozd58ef2c62019-05-16 12:09:18 +01006882 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6883 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006884
6885 // Set quantization parameters if the requested type is a quantized type.
6886 if(armnn::IsQuantizedType<T>())
6887 {
6888 inputTensorInfo.SetQuantizationScale(qScale);
6889 inputTensorInfo.SetQuantizationOffset(qOffset);
6890 outputTensorInfo.SetQuantizationScale(qScale);
6891 outputTensorInfo.SetQuantizationOffset(qOffset);
6892 }
6893
6894 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
6895 QuantizedVector<T>(qScale, qOffset, {
6896 // Batch 0, Channel 0
6897 235.0f, 46.0f, 178.0f,
6898 100.0f, 123.0f, 19.0f,
6899 172.0f, 74.0f, 250.0f,
6900 6.0f, 195.0f, 80.0f,
6901
6902 // Batch 0, Channel 1
6903 113.0f, 95.0f, 202.0f,
6904 77.0f, 114.0f, 71.0f,
6905 122.0f, 246.0f, 166.0f,
6906 82.0f, 28.0f, 37.0f,
6907
6908 // Batch 0, Channel 2
6909 56.0f, 170.0f, 162.0f,
6910 194.0f, 89.0f, 254.0f,
6911 12.0f, 209.0f, 200.0f,
6912 1.0f, 64.0f, 54.0f,
6913
6914 // Batch 1, Channel 0
6915 67.0f, 90.0f, 49.0f,
6916 7.0f, 163.0f, 18.0f,
6917 25.0f, 117.0f, 103.0f,
6918 247.0f, 59.0f, 189.0f,
6919
6920 // Batch 1, Channel 1
6921 239.0f, 104.0f, 199.0f,
6922 17.0f, 124.0f, 153.0f,
6923 222.0f, 217.0f, 75.0f,
6924 32.0f, 126.0f, 21.0f,
6925
6926 // Batch 1, Channel 2
6927 97.0f, 145.0f, 215.0f,
6928 115.0f, 116.0f, 238.0f,
6929 226.0f, 16.0f, 132.0f,
6930 92.0f, 125.0f, 88.0f,
6931 })));
6932
6933 LayerTestResult<T, 4> result(outputTensorInfo);
6934 result.outputExpected = input;
6935
6936 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6937
6938 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
6939 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
6940
6941 armnn::ConstantQueueDescriptor descriptor;
6942 descriptor.m_LayerOutput = &constantTensor;
6943
6944 armnn::WorkloadInfo info;
6945 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6946
6947 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6948
6949 outputHandle->Allocate();
6950
Derek Lambertif30f7d32019-04-09 10:25:02 +01006951 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006952 workload->Execute();
6953
6954 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6955 return result;
6956}
6957
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006958LayerTestResult<float, 4> ConstantTest(
6959 armnn::IWorkloadFactory& workloadFactory,
6960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006961{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006962 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006963}
6964
Nina Drozd58ef2c62019-05-16 12:09:18 +01006965LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
6966 armnn::IWorkloadFactory& workloadFactory,
6967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6968{
6969 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
6970}
6971
6972LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006973 armnn::IWorkloadFactory& workloadFactory,
6974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006975{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006976 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006977}
6978
Jim Flynn4ed6c832019-05-20 11:02:46 +01006979LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00006980 armnn::IWorkloadFactory& workloadFactory,
6981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6982{
6983 unsigned int outputWidth = 3;
6984 unsigned int outputHeight = 6;
6985 unsigned int outputChannels = 3;
6986
6987 unsigned int inputWidth1 = 3;
6988 unsigned int inputHeight1 = 6;
6989 unsigned int inputChannels1 = 2;
6990
6991 unsigned int inputWidth2 = 3;
6992 unsigned int inputHeight2 = 6;
6993 unsigned int inputChannels2 = 1;
6994
6995 // Defines the tensor descriptors.
6996 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6997 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6998 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6999
7000 // Quantized input1 tensor. Range [-3, 1]
7001 const float inputScale1 = 0.015686f;
7002 const int32_t inputOffset1 = 192;
7003
7004 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7005 {
7006 1, 2, 3,
7007 4, 5, 6,
7008 7, 8, 9,
7009 10, 11, 12,
7010 13, 14, 15,
7011 16, 17, 18,
7012
7013 19, 20, 21,
7014 22, 23, 24,
7015 25, 26, 27,
7016 28, 29, 30,
7017 31, 32, 33,
7018 34, 35, 36,
7019 })
7020 );
7021
7022 // Quatized input2 tensor. Range [-1, 4]
7023 const float inputScale2 = 0.019608f;
7024 const int32_t inputOffset2 = 50;
7025
7026 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7027 {
7028 37, 38, 39,
7029 40, 41, 42,
7030 43, 44, 45,
7031 46, 47, 48,
7032 49, 50, 51,
7033 52, 53, 54,
7034 })
7035 );
7036
7037 // Output has the same quantization parameters than input1,
7038 // so that only the requantization of input2 is required
7039 const float outputScale = 0.015686f;
7040 const int32_t outputOffset = 192;
7041
7042 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7043
7044 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7045 {
7046 1, 2, 3,
7047 4, 5, 6,
7048 7, 8, 9,
7049 10, 11, 12,
7050 13, 14, 15,
7051 16, 17, 18,
7052
7053 19, 20, 21,
7054 22, 23, 24,
7055 25, 26, 27,
7056 28, 29, 30,
7057 31, 32, 33,
7058 34, 35, 36,
7059
7060 176, 177, 178,
7061 179, 181, 182,
7062 183, 184, 186,
7063 187, 188, 189,
7064 191, 192, 193,
7065 195, 196, 197,
7066 })
7067 );
7068
7069 outputTensorInfo.SetQuantizationScale(outputScale);
7070 outputTensorInfo.SetQuantizationOffset(outputOffset);
7071 inputTensorInfo1.SetQuantizationScale(inputScale1);
7072 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7073 inputTensorInfo2.SetQuantizationScale(inputScale2);
7074 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7075
7076 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007077 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007078
7079 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007080 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007081
7082 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7083
7084 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7085
7086 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7087 subTensorsSupported ?
7088 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7089 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7090
7091 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7092 subTensorsSupported ?
7093 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7094 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7095
Jim Flynne242f2d2019-05-22 14:24:13 +01007096 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00007097 armnn::WorkloadInfo info;
7098 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7099 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7100 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7101
7102 data.m_ViewOrigins.push_back(window1);
7103 data.m_ViewOrigins.push_back(window2);
7104
Jim Flynn4ed6c832019-05-20 11:02:46 +01007105 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007106
7107 inputHandle1->Allocate();
7108 inputHandle2->Allocate();
7109 outputHandle->Allocate();
7110
7111 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7112 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7113
Derek Lambertif30f7d32019-04-09 10:25:02 +01007114 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00007115 workload->Execute();
7116
7117 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7118
7119 return ret;
7120}
7121
Jim Flynn4ed6c832019-05-20 11:02:46 +01007122LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007123 armnn::IWorkloadFactory& workloadFactory,
7124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007125{
surmeh013537c2c2018-05-18 16:31:43 +01007126 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00007127 unsigned int outputHeight = 6;
7128 unsigned int outputChannels = 3;
7129
surmeh013537c2c2018-05-18 16:31:43 +01007130 unsigned int inputWidth1 = 3;
7131 unsigned int inputHeight1 = 6;
7132 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00007133
surmeh013537c2c2018-05-18 16:31:43 +01007134 unsigned int inputWidth2 = 3;
7135 unsigned int inputHeight2 = 6;
7136 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00007137
telsoa01c577f2c2018-08-31 09:22:23 +01007138 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00007139 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7140 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7141 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00007142
Jim Flynn4ed6c832019-05-20 11:02:46 +01007143 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00007144 const float scale = 0.13497836f;
7145 const int32_t offset = -7;
7146
7147 outputTensorInfo.SetQuantizationScale(scale);
7148 outputTensorInfo.SetQuantizationOffset(offset);
7149 inputTensorInfo1.SetQuantizationScale(scale);
7150 inputTensorInfo1.SetQuantizationOffset(offset);
7151 inputTensorInfo2.SetQuantizationScale(scale);
7152 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00007153
7154 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7155
7156 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01007157 {
7158 1, 2, 3,
7159 4, 5, 6,
7160 7, 8, 9,
7161 10, 11, 12,
7162 13, 14, 15,
7163 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007164
surmeh013537c2c2018-05-18 16:31:43 +01007165 19, 20, 21,
7166 22, 23, 24,
7167 25, 26, 27,
7168 28, 29, 30,
7169 31, 32, 33,
7170 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007171
surmeh013537c2c2018-05-18 16:31:43 +01007172 37, 38, 39,
7173 40, 41, 42,
7174 43, 44, 45,
7175 46, 47, 48,
7176 49, 50, 51,
7177 52, 53, 54,
7178 })
telsoa014fcda012018-03-09 14:13:49 +00007179 );
7180
telsoa014fcda012018-03-09 14:13:49 +00007181 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7182 {
surmeh013537c2c2018-05-18 16:31:43 +01007183 1, 2, 3,
7184 4, 5, 6,
7185 7, 8, 9,
7186 10, 11, 12,
7187 13, 14, 15,
7188 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007189
surmeh013537c2c2018-05-18 16:31:43 +01007190 19, 20, 21,
7191 22, 23, 24,
7192 25, 26, 27,
7193 28, 29, 30,
7194 31, 32, 33,
7195 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007196 })
7197 );
7198
7199 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7200 {
surmeh013537c2c2018-05-18 16:31:43 +01007201 37, 38, 39,
7202 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00007203 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01007204 46, 47, 48,
7205 49, 50, 51,
7206 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00007207 })
7208 );
7209
telsoa01c577f2c2018-08-31 09:22:23 +01007210 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007211 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00007212
telsoa01c577f2c2018-08-31 09:22:23 +01007213 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007214 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00007215
telsoa014fcda012018-03-09 14:13:49 +00007216
7217 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7218
7219 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7220
7221 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7222 subTensorsSupported ?
7223 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7224 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7225
7226 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7227 subTensorsSupported ?
7228 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7229 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7230
telsoa014fcda012018-03-09 14:13:49 +00007231
Jim Flynne242f2d2019-05-22 14:24:13 +01007232 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00007233 armnn::WorkloadInfo info;
7234 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7235 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00007236 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7237
7238 data.m_ViewOrigins.push_back(window1);
7239 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00007240
Jim Flynn4ed6c832019-05-20 11:02:46 +01007241 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00007242
7243 inputHandle1->Allocate();
7244 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007245 outputHandle->Allocate();
7246
7247 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7248 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007249
Derek Lambertif30f7d32019-04-09 10:25:02 +01007250 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007251 workload->Execute();
7252
7253 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7254
7255 return ret;
7256}
7257
Jim Flynn4ed6c832019-05-20 11:02:46 +01007258LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01007259 armnn::IWorkloadFactory& workloadFactory,
7260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7261{
7262 unsigned int outputWidth = 3;
7263 unsigned int outputHeight = 6;
7264 unsigned int outputChannels = 3;
7265
7266 unsigned int inputWidth1 = 3;
7267 unsigned int inputHeight1 = 6;
7268 unsigned int inputChannels1 = 2;
7269
7270 unsigned int inputWidth2 = 3;
7271 unsigned int inputHeight2 = 6;
7272 unsigned int inputChannels2 = 1;
7273
7274 // Defines the tensor descriptors.
7275 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7276 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7277 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7278
Jim Flynn4ed6c832019-05-20 11:02:46 +01007279 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01007280 const float scale = 0.13497836f;
7281 const int32_t offset = -7;
7282
7283 outputTensorInfo.SetQuantizationScale(scale);
7284 outputTensorInfo.SetQuantizationOffset(offset);
7285 inputTensorInfo1.SetQuantizationScale(scale);
7286 inputTensorInfo1.SetQuantizationOffset(offset);
7287 inputTensorInfo2.SetQuantizationScale(scale);
7288 inputTensorInfo2.SetQuantizationOffset(offset);
7289
7290 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7291
7292 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7293 {
7294 1, 2, 3,
7295 4, 5, 6,
7296 7, 8, 9,
7297 10, 11, 12,
7298 13, 14, 15,
7299 16, 17, 18,
7300
7301 19, 20, 21,
7302 22, 23, 24,
7303 25, 26, 27,
7304 28, 29, 30,
7305 31, 32, 33,
7306 34, 35, 36,
7307
7308 37, 38, 39,
7309 40, 41, 42,
7310 43, 44, 45,
7311 46, 47, 48,
7312 49, 50, 51,
7313 52, 53, 54,
7314 }));
7315
7316 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7317 {
7318 1, 2, 3,
7319 4, 5, 6,
7320 7, 8, 9,
7321 10, 11, 12,
7322 13, 14, 15,
7323 16, 17, 18,
7324
7325 19, 20, 21,
7326 22, 23, 24,
7327 25, 26, 27,
7328 28, 29, 30,
7329 31, 32, 33,
7330 34, 35, 36,
7331 }));
7332
7333 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
7334 {
7335 37, 38, 39,
7336 40, 41, 42,
7337 43, 44, 45,
7338 46, 47, 48,
7339 49, 50, 51,
7340 52, 53, 54,
7341 }));
7342
7343 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007344 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007345
7346 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007347 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007348
7349
7350 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7351
7352 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7353
7354 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7355 subTensorsSupported ?
7356 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7357 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7358
7359 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7360 subTensorsSupported ?
7361 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7362 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7363
7364
Jim Flynne242f2d2019-05-22 14:24:13 +01007365 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01007366 armnn::WorkloadInfo info;
7367 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7368 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7369 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7370
7371 data.m_ViewOrigins.push_back(window1);
7372 data.m_ViewOrigins.push_back(window2);
7373
Jim Flynn4ed6c832019-05-20 11:02:46 +01007374 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007375
7376 inputHandle1->Allocate();
7377 inputHandle2->Allocate();
7378 outputHandle->Allocate();
7379
7380 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7381 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7382
7383 workload->PostAllocationConfigure();
7384 workload->Execute();
7385
7386 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7387
7388 return ret;
7389}
telsoa014fcda012018-03-09 14:13:49 +00007390
surmeh01bceff2f2018-03-29 16:29:27 +01007391namespace
telsoa014fcda012018-03-09 14:13:49 +00007392{
Sadik Armagan2999a022019-04-09 14:20:12 +01007393template <typename T>
7394LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007395 armnn::IWorkloadFactory& workloadFactory,
7396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7397 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007398 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007399 float scale0,
7400 int32_t offset0,
7401 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007402 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007403 float scale1,
7404 int32_t offset1,
7405 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007406 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007407 float outScale,
7408 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01007409{
Sadik Armagan2999a022019-04-09 14:20:12 +01007410 auto dataType = (std::is_same<T, uint8_t>::value ?
7411 armnn::DataType::QuantisedAsymm8 :
7412 armnn::DataType::QuantisedSymm16);
7413
7414 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
7415 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
7416 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00007417
surmeh01bceff2f2018-03-29 16:29:27 +01007418 inputTensorInfo0.SetQuantizationScale(scale0);
7419 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00007420
surmeh01bceff2f2018-03-29 16:29:27 +01007421 inputTensorInfo1.SetQuantizationScale(scale1);
7422 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00007423
surmeh01bceff2f2018-03-29 16:29:27 +01007424 outputTensorInfo.SetQuantizationScale(outScale);
7425 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00007426
Sadik Armagan2999a022019-04-09 14:20:12 +01007427 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7428 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00007429
Sadik Armagan2999a022019-04-09 14:20:12 +01007430 LayerTestResult<T, 4> result(outputTensorInfo);
7431 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7432
7433 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7434 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7435 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7436
7437 armnn::AdditionQueueDescriptor data;
7438 armnn::WorkloadInfo info;
7439 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7440 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7441 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7442
7443 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
7444
7445 inputHandle0->Allocate();
7446 inputHandle1->Allocate();
7447 outputHandle->Allocate();
7448
7449 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7450 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7451
Derek Lambertif30f7d32019-04-09 10:25:02 +01007452 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01007453 workload->Execute();
7454
7455 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7456
7457 return result;
7458}
7459} // anonymous namespace
7460
7461LayerTestResult<uint8_t, 4> AdditionUint8Test(
7462 armnn::IWorkloadFactory& workloadFactory,
7463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7464{
7465 const unsigned int shape0[] = { 1, 2, 2, 3 };
7466 const unsigned int shape1[] = { 1, 2, 2, 3 };
7467
7468 std::vector<uint8_t> input0(
7469 {
7470 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
7471 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
7472 });
7473
7474 std::vector<uint8_t> input1(
7475 {
7476 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7477 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7478 });
7479
7480 std::vector<uint8_t> output(
7481 {
7482 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
7483 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
7484 });
7485
7486 return AdditionQuantizeTestHelper(workloadFactory,
7487 memoryManager,
7488 shape0, input0, 7.0f, 3,
7489 shape1, input1, 7.0f, 3,
7490 shape0, output, 7.0f, 3);
7491}
7492
7493LayerTestResult<int16_t, 4> AdditionInt16Test(
7494 armnn::IWorkloadFactory& workloadFactory,
7495 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7496{
7497 const unsigned int shape0[] = { 1, 2, 2, 3 };
7498 const unsigned int shape1[] = { 1, 2, 2, 3 };
7499
7500 std::vector<int16_t> input0(
7501 {
7502 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7503 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7504 });
7505
7506 std::vector<int16_t> input1(
7507 {
7508 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7509 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7510 });
7511
7512 std::vector<int16_t> output(
7513 {
7514 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7515 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7516 });
7517
7518 return AdditionQuantizeTestHelper(workloadFactory,
7519 memoryManager,
7520 shape0, input0, 7.0f, 0,
7521 shape1, input1, 7.0f, 0,
7522 shape0, output, 7.0f, 0);
7523}
7524
7525namespace
7526{
7527template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7528LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7529 armnn::IWorkloadFactory& workloadFactory,
7530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7531 const unsigned int shape0[4],
7532 const std::vector<T> & values0,
7533 float scale0,
7534 int32_t offset0,
7535 const unsigned int shape1[4],
7536 const std::vector<T> & values1,
7537 float scale1,
7538 int32_t offset1,
7539 const unsigned int outShape[4],
7540 const std::vector<T> & outValues,
7541 float outScale,
7542 int32_t outOffset)
7543{
7544 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7545 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7546 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7547
7548 inputTensorInfo0.SetQuantizationScale(scale0);
7549 inputTensorInfo0.SetQuantizationOffset(offset0);
7550
7551 inputTensorInfo1.SetQuantizationScale(scale1);
7552 inputTensorInfo1.SetQuantizationOffset(offset1);
7553
7554 outputTensorInfo.SetQuantizationScale(outScale);
7555 outputTensorInfo.SetQuantizationOffset(outOffset);
7556
7557 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7558 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7559
7560 LayerTestResult<T, 4> result(outputTensorInfo);
7561 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007562
surmeh01bceff2f2018-03-29 16:29:27 +01007563 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007564 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007565 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7566
7567 armnn::MultiplicationQueueDescriptor data;
7568 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007569 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7570 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007571 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7572
7573 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7574
surmeh01bceff2f2018-03-29 16:29:27 +01007575 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007576 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007577 outputHandle->Allocate();
7578
surmeh01bceff2f2018-03-29 16:29:27 +01007579 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007580 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007581
Derek Lambertif30f7d32019-04-09 10:25:02 +01007582 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007583 workload->Execute();
7584
7585 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7586
7587 return result;
7588}
surmeh01bceff2f2018-03-29 16:29:27 +01007589} // anonymous namespace
7590
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007591LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7592 armnn::IWorkloadFactory& workloadFactory,
7593 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007594{
7595 unsigned int batchSize = 1;
7596 unsigned int channels = 2;
7597 unsigned int height = 2;
7598 unsigned int width = 3;
7599 const unsigned int shape[] = { batchSize, channels, height, width };
7600
telsoa01c577f2c2018-08-31 09:22:23 +01007601 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007602 std::vector<uint8_t> input0({
7603 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7604 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7605 });
7606
telsoa01c577f2c2018-08-31 09:22:23 +01007607 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007608 std::vector<uint8_t> input1({
7609 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7610 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7611 });
7612
telsoa01c577f2c2018-08-31 09:22:23 +01007613 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007614 std::vector<uint8_t> output(
7615 {
7616 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7617 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7618 });
7619
Sadik Armagan2999a022019-04-09 14:20:12 +01007620 // Scale/offset chosen to have output values out of range.
7621 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7622 memoryManager,
7623 shape,
7624 input0,
7625 4.0f,
7626 1,
7627 shape,
7628 input1,
7629 3.0f,
7630 -2,
7631 shape,
7632 output,
7633 1366.255f,
7634 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007635}
7636
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007637LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7638 armnn::IWorkloadFactory& workloadFactory,
7639 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007640{
7641 const unsigned int shape0[] = { 1, 2, 2, 3 };
7642 const unsigned int shape1[] = { 1, 1, 1, 1 };
7643
7644 std::vector<uint8_t> input0({
7645 1, 2, 3, 4, 5, 6,
7646 7, 8, 9, 10, 11, 12
7647 });
7648
7649 std::vector<uint8_t> input1({2});
7650
7651 std::vector<uint8_t> output({
7652 2, 4, 6, 8, 10, 12,
7653 14, 16, 18, 20, 22, 24
7654 });
7655
Sadik Armagan2999a022019-04-09 14:20:12 +01007656 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7657 memoryManager,
7658 shape0,
7659 input0,
7660 1.0f,
7661 0,
7662 shape1,
7663 input1,
7664 1.0f,
7665 0,
7666 shape0,
7667 output,
7668 1.0f,
7669 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007670}
7671
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007672LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7673 armnn::IWorkloadFactory& workloadFactory,
7674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007675{
7676 const unsigned int shape0[] = { 1, 2, 2, 3 };
7677 const unsigned int shape1[] = { 1, 1, 1, 3 };
7678
7679 std::vector<uint8_t> input0({
7680 1, 2, 3, 4, 5, 6,
7681 7, 8, 9, 10, 11, 12
7682 });
7683
7684 std::vector<uint8_t> input1({1, 2, 3});
7685
7686 std::vector<uint8_t> output({
7687 1, 4, 9, 4, 10, 18,
7688 7, 16, 27, 10, 22, 36
7689 });
7690
Sadik Armagan2999a022019-04-09 14:20:12 +01007691 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7692 memoryManager,
7693 shape0,
7694 input0,
7695 1.0f,
7696 0,
7697 shape1,
7698 input1,
7699 1.0f,
7700 0,
7701 shape0,
7702 output,
7703 1.0f,
7704 0);
7705}
7706
7707LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7708 armnn::IWorkloadFactory& workloadFactory,
7709 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7710{
7711 const unsigned int shape[] = { 1, 2, 2, 3 };
7712
7713 std::vector<int16_t> input0(
7714 {
7715 6, 7, 8, 9, 10, 11,
7716 12, 13, 14, 15, 16, 17
7717 });
7718
7719 std::vector<int16_t> input1(
7720 {
7721 1, 2, 3, 4, 5, 6,
7722 7, 8, 9, 10, 11, 12
7723 });
7724
7725 std::vector<int16_t> output(
7726 {
7727 6, 14, 24, 36, 50, 66,
7728 84, 104, 126, 150, 176, 204
7729 });
7730
7731 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7732 memoryManager,
7733 shape,
7734 input0,
7735 1.0f,
7736 0,
7737 shape,
7738 input1,
7739 1.0f,
7740 0,
7741 shape,
7742 output,
7743 1.0f,
7744 0);
7745}
7746
7747LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
7748 armnn::IWorkloadFactory& workloadFactory,
7749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7750{
7751 const unsigned int shape0[] = { 1, 2, 2, 3 };
7752 const unsigned int shape1[] = { 1, 1, 1, 1 };
7753
7754 std::vector<int16_t> input0(
7755 {
7756 1, 2, 3, 4, 5, 6,
7757 7, 8, 9, 10, 11, 12
7758 });
7759
7760 std::vector<int16_t> input1({2});
7761
7762 std::vector<int16_t> output(
7763 {
7764 2, 4, 6, 8, 10, 12,
7765 14, 16, 18, 20, 22, 24
7766 });
7767
7768 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7769 memoryManager,
7770 shape0,
7771 input0,
7772 1.0f,
7773 0,
7774 shape1,
7775 input1,
7776 1.0f,
7777 0,
7778 shape0,
7779 output,
7780 1.0f,
7781 0);
7782}
7783
7784LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7785 armnn::IWorkloadFactory& workloadFactory,
7786 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7787{
7788 const unsigned int shape0[] = { 1, 2, 2, 3 };
7789 const unsigned int shape1[] = { 1, 1, 1, 3 };
7790
7791 std::vector<int16_t> input0(
7792 {
7793 1, 2, 3, 4, 5, 6,
7794 7, 8, 9, 10, 11, 12
7795 });
7796
7797 std::vector<int16_t> input1({1, 2, 3});
7798
7799 std::vector<int16_t> output(
7800 {
7801 1, 4, 9, 4, 10, 18,
7802 7, 16, 27, 10, 22, 36
7803 });
7804
7805 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7806 memoryManager,
7807 shape0,
7808 input0,
7809 1.0f,
7810 0,
7811 shape1,
7812 input1,
7813 1.0f,
7814 0,
7815 shape0,
7816 output,
7817 1.0f,
7818 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007819}
telsoa014fcda012018-03-09 14:13:49 +00007820
David Beckf195f032018-09-06 16:46:34 +01007821namespace
7822{
Sadik Armagan2999a022019-04-09 14:20:12 +01007823template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007824LayerTestResult<T, 4> SubtractionTestHelper(
7825 armnn::IWorkloadFactory& workloadFactory,
7826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7827 const unsigned int shape0[4],
7828 const std::vector<T>& values0,
7829 float scale0,
7830 int32_t offset0,
7831 const unsigned int shape1[4],
7832 const std::vector<T> & values1,
7833 float scale1,
7834 int32_t offset1,
7835 const unsigned int outShape[4],
7836 const std::vector<T> & outValues,
7837 float outScale,
7838 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01007839{
Sadik Armagan2999a022019-04-09 14:20:12 +01007840 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7841 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7842 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01007843
7844 inputTensorInfo0.SetQuantizationScale(scale0);
7845 inputTensorInfo0.SetQuantizationOffset(offset0);
7846
7847 inputTensorInfo1.SetQuantizationScale(scale1);
7848 inputTensorInfo1.SetQuantizationOffset(offset1);
7849
7850 outputTensorInfo.SetQuantizationScale(outScale);
7851 outputTensorInfo.SetQuantizationOffset(outOffset);
7852
7853 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7854 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7855
7856 LayerTestResult<T, 4> result(outputTensorInfo);
7857 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7858
7859 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7860 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7861 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7862
7863 armnn::SubtractionQueueDescriptor data;
7864 armnn::WorkloadInfo info;
7865 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7866 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7867 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7868
7869 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
7870
7871 inputHandle0->Allocate();
7872 inputHandle1->Allocate();
7873 outputHandle->Allocate();
7874
7875 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7876 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7877
Derek Lambertif30f7d32019-04-09 10:25:02 +01007878 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01007879 workload->Execute();
7880
7881 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7882
7883 return result;
7884}
7885} // anonymous namespace
7886
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007887LayerTestResult<uint8_t, 4> SubtractionUint8Test(
7888 armnn::IWorkloadFactory& workloadFactory,
7889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007890{
7891 const unsigned int shape0[] = { 1, 1, 2, 2 };
7892 const unsigned int shape1[] = { 1, 1, 2, 2 };
7893
7894 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7895 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
7896 std::vector<uint8_t> output({ 3, 3, 5, 5 });
7897
Sadik Armagan2999a022019-04-09 14:20:12 +01007898 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7899 memoryManager,
7900 shape0, input0, 0.5f, 2,
7901 shape1, input1, 1.0f, 0,
7902 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007903}
7904
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007905LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
7906 armnn::IWorkloadFactory& workloadFactory,
7907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007908{
7909 const unsigned int shape0[] = { 1, 1, 2, 2 };
7910 const unsigned int shape1[] = { 1, 1, 1, 1 };
7911
7912 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7913 std::vector<uint8_t> input1({ 2 });
7914 std::vector<uint8_t> output({ 5, 6, 7, 8 });
7915
Sadik Armagan2999a022019-04-09 14:20:12 +01007916 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7917 memoryManager,
7918 shape0, input0, 0.5f, 2,
7919 shape1, input1, 1.0f, 0,
7920 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01007921}
7922
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007923LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
7924 armnn::IWorkloadFactory& workloadFactory,
7925 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007926{
7927 const unsigned int shape0[] = { 1, 1, 2, 2 };
7928 const unsigned int shape1[] = { 1, 1, 2, 1 };
7929
7930 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7931 std::vector<uint8_t> input1({ 2, 1 });
7932 std::vector<uint8_t> output({ 8, 11, 12, 15 });
7933
Sadik Armagan2999a022019-04-09 14:20:12 +01007934 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7935 memoryManager,
7936 shape0, input0, 1.0f, 0,
7937 shape1, input1, 1.0f, 0,
7938 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007939}
7940
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007941LayerTestResult<float, 4> SubtractionTest(
7942 armnn::IWorkloadFactory& workloadFactory,
7943 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007944{
7945 const unsigned int shape0[] = { 1, 1, 2, 2 };
7946 const unsigned int shape1[] = { 1, 1, 2, 2 };
7947
7948 std::vector<float> input0({ 1, 2, 3, 4 });
7949 std::vector<float> input1({ 1, -1, 0, 2 });
7950 std::vector<float> output({ 0, 3, 3, 2 });
7951
Sadik Armagan2999a022019-04-09 14:20:12 +01007952 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7953 memoryManager,
7954 shape0, input0, 1.0f, 0,
7955 shape1, input1, 1.0f, 0,
7956 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007957}
7958
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007959LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
7960 armnn::IWorkloadFactory& workloadFactory,
7961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007962{
7963 const unsigned int shape0[] = { 1, 1, 2, 2 };
7964 const unsigned int shape1[] = { 1, 1, 1, 1 };
7965
7966 std::vector<float> input0({ 1, 2, 3, 4 });
7967 std::vector<float> input1({ 10 });
7968 std::vector<float> output({ -9, -8, -7, -6 });
7969
Sadik Armagan2999a022019-04-09 14:20:12 +01007970 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7971 memoryManager,
7972 shape0, input0, 1.0f, 0,
7973 shape1, input1, 1.0f, 0,
7974 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007975}
7976
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007977LayerTestResult<float, 4> SubtractionBroadcastTest(
7978 armnn::IWorkloadFactory& workloadFactory,
7979 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007980{
7981 const unsigned int shape0[] = { 1, 1, 2, 2 };
7982 const unsigned int shape1[] = { 1, 1, 1, 2 };
7983
7984 std::vector<float> input0({ 1, 2, 3, 4 });
7985 std::vector<float> input1({ 10, -5 });
7986 std::vector<float> output({ -9, 7, -7, 9 });
7987
Sadik Armagan2999a022019-04-09 14:20:12 +01007988 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7989 memoryManager,
7990 shape0, input0, 1.0f, 0,
7991 shape1, input1, 1.0f, 0,
7992 shape0, output, 1.0f, 0);
7993}
7994
7995LayerTestResult<int16_t, 4> SubtractionInt16Test(
7996 armnn::IWorkloadFactory& workloadFactory,
7997 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7998{
7999 const unsigned int shape0[] = { 1, 1, 2, 2 };
8000 const unsigned int shape1[] = { 1, 1, 2, 2 };
8001
8002 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8003 std::vector<int16_t> input1({ 1, 2, 1, 2 });
8004 std::vector<int16_t> output({ 3, 3, 5, 5 });
8005
8006 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8007 memoryManager,
8008 shape0, input0, 0.5f, 0,
8009 shape1, input1, 1.0f, 0,
8010 shape0, output, 1.0f, 0);
8011}
8012
8013LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8014 armnn::IWorkloadFactory& workloadFactory,
8015 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8016{
8017 const unsigned int shape0[] = { 1, 1, 2, 2 };
8018 const unsigned int shape1[] = { 1, 1, 1, 1 };
8019
8020 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8021 std::vector<int16_t> input1({ 2 });
8022 std::vector<int16_t> output({ 3, 4, 5, 6 });
8023
8024 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8025 memoryManager,
8026 shape0, input0, 0.5f, 0,
8027 shape1, input1, 1.0f, 0,
8028 shape0, output, 1.0f, 0);
8029}
8030
8031LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8032 armnn::IWorkloadFactory& workloadFactory,
8033 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8034{
8035 const unsigned int shape0[] = { 1, 1, 2, 2 };
8036 const unsigned int shape1[] = { 1, 1, 2, 1 };
8037
8038 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8039 std::vector<int16_t> input1({ 2, 1 });
8040 std::vector<int16_t> output({ 8, 11, 12, 15 });
8041
8042 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8043 memoryManager,
8044 shape0, input0, 1.0f, 0,
8045 shape1, input1, 1.0f, 0,
8046 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008047}
8048
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008049LayerTestResult<float, 4> BatchNormTest(
8050 armnn::IWorkloadFactory& workloadFactory,
8051 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008052{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008053 // BatchSize: 1
8054 // Channels: 2
8055 // Height: 3
8056 // Width: 2
8057
8058 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8059 std::vector<float> inputValues
8060 {
8061 // Batch 0, Channel 0, Height (3) x Width (2)
8062 1.f, 4.f,
8063 4.f, 2.f,
8064 1.f, 6.f,
8065
8066 // Batch 0, Channel 1, Height (3) x Width (2)
8067 1.f, 1.f,
8068 4.f, 1.f,
8069 -2.f, 4.f
8070 };
8071 std::vector<float> expectedOutputValues
8072 {
8073 // Batch 0, Channel 0, Height (3) x Width (2)
8074 1.f, 4.f,
8075 4.f, 2.f,
8076 1.f, 6.f,
8077
8078 // Batch 0, Channel 1, Height (3) x Width (2)
8079 3.f, 3.f,
8080 4.f, 3.f,
8081 2.f, 4.f
8082 };
8083
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008084 return BatchNormTestImpl<armnn::DataType::Float32>(
8085 workloadFactory, memoryManager,
8086 inputOutputShape, inputValues, expectedOutputValues,
8087 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008088}
8089
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008090LayerTestResult<float, 4> BatchNormNhwcTest(
8091 armnn::IWorkloadFactory& workloadFactory,
8092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008093{
8094 // BatchSize: 1
8095 // Height: 3
8096 // Width: 2
8097 // Channels: 2
8098
8099 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8100 std::vector<float> inputValues
8101 {
8102 // Batch 0, Height 0, Width (2) x Channel (2)
8103 1.f, 1.f,
8104 4.f, 1.f,
8105
8106 // Batch 0, Height 1, Width (2) x Channel (2)
8107 4.f, 4.f,
8108 2.f, 1.f,
8109
8110 // Batch 0, Height 2, Width (2) x Channel (2)
8111 1.f, -2.f,
8112 6.f, 4.f
8113 };
8114 std::vector<float> expectedOutputValues
8115 {
8116 // Batch 0, Height 0, Width (2) x Channel (2)
8117 1.f, 3.f,
8118 4.f, 3.f,
8119
8120 // Batch 0, Height 1, Width (2) x Channel (2)
8121 4.f, 4.f,
8122 2.f, 3.f,
8123
8124 // Batch 0, Height 2, Width (2) x Channel (2)
8125 1.f, 2.f,
8126 6.f, 4.f
8127 };
8128
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008129 return BatchNormTestImpl<armnn::DataType::Float32>(
8130 workloadFactory, memoryManager,
8131 inputOutputShape, inputValues, expectedOutputValues,
8132 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008133}
8134
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008135LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8136 armnn::IWorkloadFactory& workloadFactory,
8137 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008138{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008139 // BatchSize: 1
8140 // Channels: 2
8141 // Height: 3
8142 // Width: 2
8143
8144 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8145 std::vector<float> inputValues
8146 {
8147 // Batch 0, Channel 0, Height (3) x Width (2)
8148 1.f, 4.f,
8149 4.f, 2.f,
8150 1.f, 6.f,
8151
8152 // Batch 0, Channel 1, Height (3) x Width (2)
8153 1.f, 1.f,
8154 4.f, 1.f,
8155 -2.f, 4.f
8156 };
8157 std::vector<float> expectedOutputValues
8158 {
8159 // Batch 0, Channel 0, Height (3) x Width (2)
8160 1.f, 4.f,
8161 4.f, 2.f,
8162 1.f, 6.f,
8163
8164 // Batch 0, Channel 1, Height (3) x Width (2)
8165 3.f, 3.f,
8166 4.f, 3.f,
8167 2.f, 4.f
8168 };
8169
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008170 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8171 workloadFactory, memoryManager,
8172 inputOutputShape, inputValues, expectedOutputValues,
8173 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008174}
8175
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008176LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8177 armnn::IWorkloadFactory& workloadFactory,
8178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008179{
8180 // BatchSize: 1
8181 // Height: 3
8182 // Width: 2
8183 // Channels: 2
8184
8185 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8186 std::vector<float> inputValues
8187 {
8188 // Batch 0, Height 0, Width (2) x Channel (2)
8189 1.f, 1.f,
8190 4.f, 1.f,
8191
8192 // Batch 0, Height 1, Width (2) x Channel (2)
8193 4.f, 4.f,
8194 2.f, 1.f,
8195
8196 // Batch 0, Height 2, Width (2) x Channel (2)
8197 1.f, -2.f,
8198 6.f, 4.f
8199 };
8200 std::vector<float> expectedOutputValues
8201 {
8202 // Batch 0, Height 0, Width (2) x Channel (2)
8203 1.f, 3.f,
8204 4.f, 3.f,
8205
8206 // Batch 0, Height 1, Width (2) x Channel (2)
8207 4.f, 4.f,
8208 2.f, 3.f,
8209
8210 // Batch 0, Height 2, Width (2) x Channel (2)
8211 1.f, 2.f,
8212 6.f, 4.f
8213 };
8214
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008215 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8216 (workloadFactory, memoryManager,
8217 inputOutputShape, inputValues, expectedOutputValues,
8218 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008219}
8220
Matteo Martincighf5507132019-06-04 10:59:47 +01008221LayerTestResult<int16_t, 4> BatchNormInt16Test(
8222 armnn::IWorkloadFactory& workloadFactory,
8223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8224{
8225 // BatchSize: 1
8226 // Channels: 2
8227 // Height: 3
8228 // Width: 2
8229
8230 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8231 std::vector<float> inputValues
8232 {
8233 // Batch 0, Channel 0, Height (3) x Width (2)
8234 1.f, 4.f,
8235 4.f, 2.f,
8236 1.f, 6.f,
8237
8238 // Batch 0, Channel 1, Height (3) x Width (2)
8239 1.f, 1.f,
8240 4.f, 1.f,
8241 -2.f, 4.f
8242 };
8243 std::vector<float> expectedOutputValues
8244 {
8245 // Batch 0, Channel 0, Height (3) x Width (2)
8246 1.f, 4.f,
8247 4.f, 2.f,
8248 1.f, 6.f,
8249
8250 // Batch 0, Channel 1, Height (3) x Width (2)
8251 3.f, 3.f,
8252 4.f, 3.f,
8253 2.f, 4.f
8254 };
8255
8256 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8257 workloadFactory, memoryManager,
8258 inputOutputShape, inputValues, expectedOutputValues,
8259 1.f/20.f, 50, armnn::DataLayout::NCHW);
8260}
8261
8262LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8263 armnn::IWorkloadFactory& workloadFactory,
8264 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8265{
8266 // BatchSize: 1
8267 // Height: 3
8268 // Width: 2
8269 // Channels: 2
8270
8271 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8272 std::vector<float> inputValues
8273 {
8274 // Batch 0, Height 0, Width (2) x Channel (2)
8275 1.f, 1.f,
8276 4.f, 1.f,
8277
8278 // Batch 0, Height 1, Width (2) x Channel (2)
8279 4.f, 4.f,
8280 2.f, 1.f,
8281
8282 // Batch 0, Height 2, Width (2) x Channel (2)
8283 1.f, -2.f,
8284 6.f, 4.f
8285 };
8286 std::vector<float> expectedOutputValues
8287 {
8288 // Batch 0, Height 0, Width (2) x Channel (2)
8289 1.f, 3.f,
8290 4.f, 3.f,
8291
8292 // Batch 0, Height 1, Width (2) x Channel (2)
8293 4.f, 4.f,
8294 2.f, 3.f,
8295
8296 // Batch 0, Height 2, Width (2) x Channel (2)
8297 1.f, 2.f,
8298 6.f, 4.f
8299 };
8300
8301 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8302 (workloadFactory, memoryManager,
8303 inputOutputShape, inputValues, expectedOutputValues,
8304 1.f/20.f, 50, armnn::DataLayout::NHWC);
8305}
8306
Nina Drozd58ef2c62019-05-16 12:09:18 +01008307LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008308 armnn::IWorkloadFactory& workloadFactory,
8309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008310{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008311 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008312}
8313
Nina Drozd58ef2c62019-05-16 12:09:18 +01008314LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8315 armnn::IWorkloadFactory& workloadFactory,
8316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8317{
8318 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8319}
8320
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008321LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8322 armnn::IWorkloadFactory& workloadFactory,
8323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008324{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008325 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008326}
8327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008328LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8329 armnn::IWorkloadFactory& workloadFactory,
8330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008331{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008332 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008333}
8334
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008335LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8336 armnn::IWorkloadFactory& workloadFactory,
8337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008338{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008339 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008340}
8341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008342LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8343 armnn::IWorkloadFactory& workloadFactory,
8344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008345{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008346 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8347 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008348}
8349
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008350LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8351 armnn::IWorkloadFactory& workloadFactory,
8352 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008353{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008354 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8355 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008356}
8357
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008358LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8359 armnn::IWorkloadFactory& workloadFactory,
8360 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008361{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008362 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008363}
8364
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008365LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8366 armnn::IWorkloadFactory& workloadFactory,
8367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008368{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008369 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008370}
8371
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008372LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8373 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8375 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008376{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008377 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8378 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008379}
8380
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008381LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8382 armnn::IWorkloadFactory& workloadFactory,
8383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008384{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008385 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008386}
8387
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008388LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8389 armnn::IWorkloadFactory& workloadFactory,
8390 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008391{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008392 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8393 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008394}
8395
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008396LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8397 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8399 bool useSubtensor)
8400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008401 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8402 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008403}
8404
8405LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8406 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008408{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008409 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008410}
8411
8412LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8413 armnn::IWorkloadFactory& workloadFactory,
8414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8415{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008416 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008417}
8418
8419LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8420 armnn::IWorkloadFactory& workloadFactory,
8421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8422{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008423 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008424}
8425
8426LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8427 armnn::IWorkloadFactory& workloadFactory,
8428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8429{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008430 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8431 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008432}
8433
8434LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8435 armnn::IWorkloadFactory& workloadFactory,
8436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8437{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008438 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8439 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008440}
8441
8442LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8443 armnn::IWorkloadFactory& workloadFactory,
8444 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8445{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008446 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8447 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008448}
8449
8450LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8451 armnn::IWorkloadFactory& workloadFactory,
8452 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8453{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008454 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8455 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008456}
8457
8458LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8459 armnn::IWorkloadFactory& workloadFactory,
8460 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8461 bool useSubtensor)
8462{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008463 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8464 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008465}
8466
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008467LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8468 armnn::IWorkloadFactory& workloadFactory,
8469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8470 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008471{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008472 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8473 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008474}
8475
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008476LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8477 armnn::IWorkloadFactory& workloadFactory,
8478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8479 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008480{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008481 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008482 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008483}
8484
Teresa Charlin0434df62019-06-06 13:40:35 +01008485LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
8486 armnn::IWorkloadFactory& workloadFactory,
8487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8488 bool forceNoPadding)
8489{
8490 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
8491 workloadFactory, memoryManager, forceNoPadding);
8492}
8493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008494LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8495 armnn::IWorkloadFactory& workloadFactory,
8496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8497 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008498{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008499 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8500 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008501}
8502
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008503LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8504 armnn::IWorkloadFactory& workloadFactory,
8505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8506 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008507{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008508 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008509 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008510}
8511
Teresa Charlin0434df62019-06-06 13:40:35 +01008512LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
8513 armnn::IWorkloadFactory& workloadFactory,
8514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8515 bool forceNoPadding)
8516{
8517 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
8518 workloadFactory, memoryManager, forceNoPadding);
8519}
8520
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008521LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8522 armnn::IWorkloadFactory& workloadFactory,
8523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008524 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008525{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008526 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008527}
8528
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008529LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8530 armnn::IWorkloadFactory& workloadFactory,
8531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008532 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008533{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008534 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008535}
8536
Teresa Charlin0434df62019-06-06 13:40:35 +01008537LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
8538 armnn::IWorkloadFactory& workloadFactory,
8539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8540 const armnn::DataLayout dataLayout)
8541{
8542 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8543}
8544LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8545 armnn::IWorkloadFactory& workloadFactory,
8546 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8547{
8548 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8549}
8550
8551LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8552 armnn::IWorkloadFactory& workloadFactory,
8553 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8554{
8555 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8556 workloadFactory, memoryManager, 1.0f, -5);
8557}
8558
8559LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
8560 armnn::IWorkloadFactory& workloadFactory,
8561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8562{
8563 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8564 workloadFactory, memoryManager);
8565}
8566
8567LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8568 armnn::IWorkloadFactory& workloadFactory,
8569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8570{
8571 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8572}
8573
8574LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8575 armnn::IWorkloadFactory& workloadFactory,
8576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8577{
8578 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8579 workloadFactory, memoryManager, 1.0f, -5);
8580}
8581
8582LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
8583 armnn::IWorkloadFactory& workloadFactory,
8584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8585{
8586 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8587 workloadFactory, memoryManager);
8588}
8589
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008590LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8591 armnn::IWorkloadFactory& workloadFactory,
8592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008593 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008594{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008595 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008596}
8597
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008598LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8599 armnn::IWorkloadFactory& workloadFactory,
8600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008601 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008602{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008603 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008604 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008605}
8606
Teresa Charlin0434df62019-06-06 13:40:35 +01008607LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
8608 armnn::IWorkloadFactory& workloadFactory,
8609 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8610 const armnn::DataLayout dataLayout)
8611{
8612 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8613 workloadFactory, memoryManager, dataLayout);
8614}
8615
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008616LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8617 armnn::IWorkloadFactory& workloadFactory,
8618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8619 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008620{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008621 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008622 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008623}
8624
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008625LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8626 armnn::IWorkloadFactory& workloadFactory,
8627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008628{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008629 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008630}
8631
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008632LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8633 armnn::IWorkloadFactory& workloadFactory,
8634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008635{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008636 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8637 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008638}
8639
Teresa Charlin0434df62019-06-06 13:40:35 +01008640LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
8641 armnn::IWorkloadFactory& workloadFactory,
8642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8643{
8644 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8645 workloadFactory, memoryManager);
8646}
8647LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8648 armnn::IWorkloadFactory& workloadFactory,
8649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8650{
8651 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8652}
8653
8654LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8655 armnn::IWorkloadFactory& workloadFactory,
8656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8657{
8658 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8659 workloadFactory, memoryManager);
8660}
8661
8662LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
8663 armnn::IWorkloadFactory& workloadFactory,
8664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8665{
8666 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8667 workloadFactory, memoryManager);
8668}
8669
8670LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8671 armnn::IWorkloadFactory& workloadFactory,
8672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8673{
8674 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8675 workloadFactory, memoryManager);
8676}
8677
8678LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
8679 armnn::IWorkloadFactory& workloadFactory,
8680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8681{
8682 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8683 workloadFactory, memoryManager);
8684}
8685
8686LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
8687 armnn::IWorkloadFactory& workloadFactory,
8688 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8689{
8690 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
8691 workloadFactory, memoryManager);
8692}
8693
8694LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8695 armnn::IWorkloadFactory& workloadFactory,
8696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8697{
8698 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8699}
8700
8701LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8702 armnn::IWorkloadFactory& workloadFactory,
8703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8704{
8705 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8706 workloadFactory, memoryManager);
8707}
8708
8709LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
8710 armnn::IWorkloadFactory& workloadFactory,
8711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8712{
8713 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8714 workloadFactory, memoryManager);
8715}
8716
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008717LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8718 armnn::IWorkloadFactory& workloadFactory,
8719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008720 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008721{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008722 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008723}
8724
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008725LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8726 armnn::IWorkloadFactory& workloadFactory,
8727 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008728 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008729{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008730 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008731}
8732
Teresa Charlin0434df62019-06-06 13:40:35 +01008733LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
8734 armnn::IWorkloadFactory& workloadFactory,
8735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8736 const armnn::DataLayout dataLayout)
8737{
8738 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8739}
8740
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008741LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8742 armnn::IWorkloadFactory& workloadFactory,
8743 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008744{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008745 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008746}
8747
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008748LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8749 armnn::IWorkloadFactory& workloadFactory,
8750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008751{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008752 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008753}
8754
Teresa Charlin0434df62019-06-06 13:40:35 +01008755LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
8756 armnn::IWorkloadFactory& workloadFactory,
8757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8758{
8759 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8760}
8761
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008762LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8763 armnn::IWorkloadFactory& workloadFactory,
8764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008765{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008766 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008767}
8768
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008769LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8770 armnn::IWorkloadFactory& workloadFactory,
8771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008772{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008773 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008774}
8775
Teresa Charlin0434df62019-06-06 13:40:35 +01008776LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
8777 armnn::IWorkloadFactory& workloadFactory,
8778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8779{
8780 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8781}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008782LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8783 armnn::IWorkloadFactory& workloadFactory,
8784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008785{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008786 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008787}
8788
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008789LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8790 armnn::IWorkloadFactory& workloadFactory,
8791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008792{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008793 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008794}
8795
Teresa Charlin0434df62019-06-06 13:40:35 +01008796LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
8797 armnn::IWorkloadFactory& workloadFactory,
8798 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8799{
8800 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8801}
8802
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008803LayerTestResult<float, 4> L2Pooling2dSize7Test(
8804 armnn::IWorkloadFactory& workloadFactory,
8805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008806{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008807 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008808}
8809
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008810LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8811 armnn::IWorkloadFactory& workloadFactory,
8812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008813{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008814 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008815}
8816
Teresa Charlin0434df62019-06-06 13:40:35 +01008817LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
8818 armnn::IWorkloadFactory& workloadFactory,
8819 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8820{
8821 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8822}
8823
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008824LayerTestResult<float, 4> L2Pooling2dSize9Test(
8825 armnn::IWorkloadFactory& workloadFactory,
8826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008827{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008828 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008829}
8830
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008831LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8832 armnn::IWorkloadFactory& workloadFactory,
8833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008834{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008835 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008836}
8837
Teresa Charlin0434df62019-06-06 13:40:35 +01008838LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
8839 armnn::IWorkloadFactory& workloadFactory,
8840 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8841{
8842 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8843}
8844LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
8845 armnn::IWorkloadFactory& workloadFactory,
8846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8847{
8848 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8849}
8850
8851LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
8852 armnn::IWorkloadFactory& workloadFactory,
8853 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8854{
8855 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8856}
8857
8858LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
8859 armnn::IWorkloadFactory& workloadFactory,
8860 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8861{
8862 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8863}
8864
8865LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
8866 armnn::IWorkloadFactory& workloadFactory,
8867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8868{
8869 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8870}
8871
8872LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
8873 armnn::IWorkloadFactory& workloadFactory,
8874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8875{
8876 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8877}
8878
8879LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
8880 armnn::IWorkloadFactory& workloadFactory,
8881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8882{
8883 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8884}
8885
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008886LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
8887 armnn::IWorkloadFactory& workloadFactory,
8888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008889{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008890 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008891}
8892
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008893LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
8894 armnn::IWorkloadFactory& workloadFactory,
8895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008896{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008897 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008898}
8899
Teresa Charlin0434df62019-06-06 13:40:35 +01008900LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
8901 armnn::IWorkloadFactory& workloadFactory,
8902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8903{
8904 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8905}
8906
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008907LayerTestResult<float, 4> ComparePooling2dTest(
8908 armnn::IWorkloadFactory& workloadFactory,
8909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8910 armnn::IWorkloadFactory& refWorkloadFactory,
8911 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008912{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008913 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008914 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00008915}
8916
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008917LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
8918 armnn::IWorkloadFactory& workloadFactory,
8919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8920 armnn::IWorkloadFactory& refWorkloadFactory,
8921 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008922{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008923 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008924 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008925}
8926
Teresa Charlin0434df62019-06-06 13:40:35 +01008927LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
8928 armnn::IWorkloadFactory& workloadFactory,
8929 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8930 armnn::IWorkloadFactory& refWorkloadFactory,
8931 armnn::PoolingAlgorithm poolingType)
8932{
8933 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8934 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
8935}
8936
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008937LayerTestResult<float, 2> FullyConnectedLargeTest(
8938 armnn::IWorkloadFactory& workloadFactory,
8939 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8940 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00008941{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008942 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00008943}
8944
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008945LayerTestResult<float, 4> SimplePermuteFloat32Test(
8946 armnn::IWorkloadFactory& workloadFactory,
8947 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008948{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008949 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008950};
8951
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008952LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
8953 armnn::IWorkloadFactory& workloadFactory,
8954 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008955{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008956 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008957};
surmeh01bceff2f2018-03-29 16:29:27 +01008958
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008959LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
8960 armnn::IWorkloadFactory& workloadFactory,
8961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008962{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008963 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008964};
8965
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008966LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
8967 armnn::IWorkloadFactory& workloadFactory,
8968 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008969{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008970 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008971};
8972
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008973LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
8974 armnn::IWorkloadFactory& workloadFactory,
8975 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008976{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008977 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01008978};
8979
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008980LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
8981 armnn::IWorkloadFactory& workloadFactory,
8982 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008983{
8984 // Create Initial Tensor
8985 // 1, 2, 3
8986 // 4, 5, 6
8987 // 7, 8, 9
8988
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008989 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
8990 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008991
8992 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
8993 {1, 2, 3,
8994 4, 5, 6,
8995 7, 8, 9
8996 });
8997
8998 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
8999 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9000 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9001 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9002
9003 // Apply MaxPool poolSize = 1x1, stride=2x2
9004 // Result =
9005 // 1, 3
9006 // 7, 9
9007 armnn::Pooling2dDescriptor descriptor;
9008 descriptor.m_PoolHeight = 1;
9009 descriptor.m_PoolWidth = 1;
9010 descriptor.m_StrideX = 2;
9011 descriptor.m_StrideY = 2;
9012 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9013
9014 armnn::Pooling2dQueueDescriptor queueDescriptor;
9015 queueDescriptor.m_Parameters = descriptor;
9016 armnn::WorkloadInfo workloadInfo;
9017 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9018 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9019
9020 // Create the MaxPool
9021 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9022
9023 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9024 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9025 boost::multi_array<float, 4> resultMaxPool;
9026 resultMaxPool.resize(shape);
9027
9028
9029 // Create addition with another tensor the same size
9030 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9031 // with the initial tensor.
9032 // 12, 16
9033 // 24, 28
9034
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009035 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9036 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009037
9038 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9039 {12, 16,
9040 24, 28,
9041 });
9042
9043 // Expected output tensor after MaxPool and Addition.
9044 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9045 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9046 {
9047 13, 19,
9048 31, 37
9049 }));
9050
9051 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9052 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9053
9054 armnn::AdditionQueueDescriptor data;
9055 armnn::WorkloadInfo info;
9056
9057 // Add the output of the MaxPool and the new tensor
9058 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9059 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9060 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9061
9062 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9063
9064 poolingInputHandle->Allocate();
9065 poolingOutputHandle->Allocate();
9066 addInputHandle->Allocate();
9067 addOutputHandle->Allocate();
9068
9069 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9070 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9071
9072 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9073 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9074
Derek Lambertif30f7d32019-04-09 10:25:02 +01009075 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009076 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009077 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009078 addWorkload->Execute();
9079
9080 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9081
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009082 return addRet;
9083}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009084
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009085LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9086 armnn::IWorkloadFactory& workloadFactory,
9087 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009088{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009089 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009090}
9091
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009092LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9093 armnn::IWorkloadFactory& workloadFactory,
9094 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009095{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009096 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009097}
9098
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009099LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9100 armnn::IWorkloadFactory& workloadFactory,
9101 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009102{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009103 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009104}
9105
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009106LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9107 armnn::IWorkloadFactory& workloadFactory,
9108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009109{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009110 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009111}
9112
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009113LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9114 armnn::IWorkloadFactory& workloadFactory,
9115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009116{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009117 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009118}
9119
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009120LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9121 armnn::IWorkloadFactory& workloadFactory,
9122 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009123{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009124 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009125}
9126
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009127LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9128 armnn::IWorkloadFactory& workloadFactory,
9129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009130{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009131 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009132}
9133
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009134LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9135 armnn::IWorkloadFactory& workloadFactory,
9136 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009137{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009138 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009139}
9140
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009141LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9142 armnn::IWorkloadFactory& workloadFactory,
9143 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009144{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009145 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009146}
9147
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009148LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9149 armnn::IWorkloadFactory& workloadFactory,
9150 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009151{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009152 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009153}
9154
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009155LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9156 armnn::IWorkloadFactory& workloadFactory,
9157 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009158{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009159 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009160}
9161
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009162LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9163 armnn::IWorkloadFactory& workloadFactory,
9164 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009165{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009166 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009167}
9168
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009169LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9170 armnn::IWorkloadFactory& workloadFactory,
9171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009172{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009173 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009174}
9175
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009176LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9177 armnn::IWorkloadFactory& workloadFactory,
9178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009179{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009180 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009181}
9182
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009183LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9184 armnn::IWorkloadFactory& workloadFactory,
9185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009186{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009187 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009188}
9189
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009190LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9191 armnn::IWorkloadFactory& workloadFactory,
9192 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009193{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009194 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009195}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009196
nikraj01120522a2019-05-31 11:33:07 +01009197LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9198 armnn::IWorkloadFactory& workloadFactory,
9199 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9200{
9201 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9202}
9203
9204LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9205 armnn::IWorkloadFactory& workloadFactory,
9206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9207{
9208 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9209}
9210
9211LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9212 armnn::IWorkloadFactory& workloadFactory,
9213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9214{
9215 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9216}
9217
9218LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9219 armnn::IWorkloadFactory& workloadFactory,
9220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9221{
9222 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9223}
9224
9225LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9226 armnn::IWorkloadFactory& workloadFactory,
9227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9228{
9229 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9230}
9231
9232LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9233 armnn::IWorkloadFactory& workloadFactory,
9234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9235{
9236 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9237}
9238
9239LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9240 armnn::IWorkloadFactory& workloadFactory,
9241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9242{
9243 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9244}
9245
9246LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9247 armnn::IWorkloadFactory& workloadFactory,
9248 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9249{
9250 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9251}
9252
Keith Davisa57eccb2019-06-14 17:33:22 +01009253
9254LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9255 armnn::IWorkloadFactory& workloadFactory,
9256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9257{
9258 return SpaceToDepthSimpleTest<armnn::DataType::QuantisedAsymm8>(
9259 workloadFactory,
9260 memoryManager);
9261}
9262
9263LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9264 armnn::IWorkloadFactory& workloadFactory,
9265 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9266{
9267 return SpaceToDepthSimpleTest<armnn::DataType::QuantisedAsymm8>(
9268 workloadFactory,
9269 memoryManager,
9270 armnn::DataLayout::NCHW);
9271}
9272
9273LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test(
9274 armnn::IWorkloadFactory& workloadFactory,
9275 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9276{
9277 return SpaceToDepthFloatTest<armnn::DataType::Float32>(
9278 workloadFactory,
9279 memoryManager);
9280}
9281
9282LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test(
9283 armnn::IWorkloadFactory& workloadFactory,
9284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9285{
9286 return SpaceToDepthFloatTest<armnn::DataType::Float32>(
9287 workloadFactory,
9288 memoryManager,
9289 armnn::DataLayout::NCHW);
9290}
9291
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009292namespace {
9293
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009294} // anonymous namespace
9295
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009296LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9297 armnn::IWorkloadFactory& workloadFactory,
9298 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9299{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009300 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009301}
9302
9303LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9304 armnn::IWorkloadFactory& workloadFactory,
9305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9306{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009307 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009308}
9309
9310LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9311 armnn::IWorkloadFactory& workloadFactory,
9312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9313{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009314 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009315}
9316
9317LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9318 armnn::IWorkloadFactory& workloadFactory,
9319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9320{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009321 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009322}
9323
9324LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9325 armnn::IWorkloadFactory& workloadFactory,
9326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9327{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009328 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009329}
9330
9331LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9332 armnn::IWorkloadFactory& workloadFactory,
9333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9334{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009335 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009336}
9337
9338LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9339 armnn::IWorkloadFactory& workloadFactory,
9340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9341{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009342 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009343}
9344
9345LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9346 armnn::IWorkloadFactory& workloadFactory,
9347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9348{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009349 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009350}
9351
9352LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9353 armnn::IWorkloadFactory& workloadFactory,
9354 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9355{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009356 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009357}
9358
9359LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9360 armnn::IWorkloadFactory& workloadFactory,
9361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9362{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009363 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009364}
9365
9366LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9367 armnn::IWorkloadFactory& workloadFactory,
9368 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9369{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009370 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009371}
9372
9373LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9374 armnn::IWorkloadFactory& workloadFactory,
9375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9376{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009377 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009378}
9379
9380LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9381 armnn::IWorkloadFactory& workloadFactory,
9382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9383{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009384 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009385}
9386
9387LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9388 armnn::IWorkloadFactory& workloadFactory,
9389 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009391 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009392}
9393
9394LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9395 armnn::IWorkloadFactory& workloadFactory,
9396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9397{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009398 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009399}
9400
9401LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9402 armnn::IWorkloadFactory& workloadFactory,
9403 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9404{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009405 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009406}
9407
9408LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9409 armnn::IWorkloadFactory& workloadFactory,
9410 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9411{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009412 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009413}
9414
9415LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9416 armnn::IWorkloadFactory& workloadFactory,
9417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9418{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009419 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009420}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009421
Matteo Martincigh42666a12019-05-29 08:53:41 +01009422LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
9423 armnn::IWorkloadFactory& workloadFactory,
9424 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9425{
9426 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9427}
9428
9429LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
9430 armnn::IWorkloadFactory& workloadFactory,
9431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9432{
9433 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9434}
9435
9436LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
9437 armnn::IWorkloadFactory& workloadFactory,
9438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9439{
9440 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9441}
9442
9443LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
9444 armnn::IWorkloadFactory& workloadFactory,
9445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9446{
9447 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9448}
9449
9450LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
9451 armnn::IWorkloadFactory& workloadFactory,
9452 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9453{
9454 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9455}
9456
9457LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
9458 armnn::IWorkloadFactory& workloadFactory,
9459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9460{
9461 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9462}
9463
9464LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
9465 armnn::IWorkloadFactory& workloadFactory,
9466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9467{
9468 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9469}
9470
9471LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
9472 armnn::IWorkloadFactory& workloadFactory,
9473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9474{
9475 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9476}
9477
9478LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
9479 armnn::IWorkloadFactory& workloadFactory,
9480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9481{
9482 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9483}
9484
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009485LayerTestResult<float, 4> Debug4DFloat32Test(
9486 armnn::IWorkloadFactory& workloadFactory,
9487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9488{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009489 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009490}
9491
9492LayerTestResult<float, 3> Debug3DFloat32Test(
9493 armnn::IWorkloadFactory& workloadFactory,
9494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9495{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009496 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009497}
9498
9499LayerTestResult<float, 2> Debug2DFloat32Test(
9500 armnn::IWorkloadFactory& workloadFactory,
9501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9502{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009503 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009504}
9505
9506LayerTestResult<float, 1> Debug1DFloat32Test(
9507 armnn::IWorkloadFactory& workloadFactory,
9508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9509{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009510 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009511}
9512
9513LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9514 armnn::IWorkloadFactory& workloadFactory,
9515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9516{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009517 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009518}
9519
9520LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9521 armnn::IWorkloadFactory& workloadFactory,
9522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9523{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009524 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009525}
9526
9527LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9528 armnn::IWorkloadFactory& workloadFactory,
9529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9530{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009531 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009532}
9533
9534LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9535 armnn::IWorkloadFactory& workloadFactory,
9536 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9537{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009538 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009539}
Matteo Martincigh49124022019-01-11 13:25:59 +00009540
narpra014951d842019-01-18 16:53:53 +00009541LayerTestResult<float, 1> Gather1DParamsFloatTest(
9542 armnn::IWorkloadFactory& workloadFactory,
9543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9544{
9545 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9546}
9547
9548LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9549 armnn::IWorkloadFactory& workloadFactory,
9550 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9551{
9552 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9553}
9554
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009555LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
9556 armnn::IWorkloadFactory& workloadFactory,
9557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9558{
9559 return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9560}
9561
narpra014951d842019-01-18 16:53:53 +00009562LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9563 armnn::IWorkloadFactory& workloadFactory,
9564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9565{
9566 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9567}
9568
9569LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9570 armnn::IWorkloadFactory& workloadFactory,
9571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9572{
9573 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9574}
9575
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009576LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
9577 armnn::IWorkloadFactory& workloadFactory,
9578 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9579{
9580 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9581}
9582
narpra014951d842019-01-18 16:53:53 +00009583LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9584 armnn::IWorkloadFactory& workloadFactory,
9585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9586{
9587 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9588}
9589
9590LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9591 armnn::IWorkloadFactory& workloadFactory,
9592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9593{
9594 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9595 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009596}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009597
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009598LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
9599 armnn::IWorkloadFactory& workloadFactory,
9600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9601{
9602 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
9603 workloadFactory, memoryManager);
9604}
9605
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009606LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009607 armnn::IWorkloadFactory& workloadFactory,
9608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9609{
9610 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9611}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009612
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009613LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9614 armnn::IWorkloadFactory& workloadFactory,
9615 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9616{
9617 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9618}
9619
9620LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9621 armnn::IWorkloadFactory& workloadFactory,
9622 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9623{
9624 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9625}
9626
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009627LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9628 armnn::IWorkloadFactory& workloadFactory,
9629 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9630{
9631 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9632}
9633
9634LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9635 armnn::IWorkloadFactory& workloadFactory,
9636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9637{
9638 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9639}
9640
9641LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9642 armnn::IWorkloadFactory& workloadFactory,
9643 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9644{
9645 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9646}
Aron Virginas-Tar735a4502019-06-26 15:02:47 +01009647
9648//
9649// TransposeConvolution2d
9650//
9651
9652// Simple biased
9653LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNchwTest(
9654 armnn::IWorkloadFactory& workloadFactory,
9655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9656{
9657 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9658 workloadFactory,
9659 memoryManager,
9660 true,
9661 armnn::DataLayout::NCHW);
9662}
9663
9664LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNhwcTest(
9665 armnn::IWorkloadFactory& workloadFactory,
9666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9667{
9668 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9669 workloadFactory,
9670 memoryManager,
9671 true,
9672 armnn::DataLayout::NHWC);
9673}
9674
9675LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NchwTest(
9676 armnn::IWorkloadFactory& workloadFactory,
9677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9678{
9679 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9680 workloadFactory,
9681 memoryManager,
9682 true,
9683 armnn::DataLayout::NCHW);
9684}
9685
9686LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NhwcTest(
9687 armnn::IWorkloadFactory& workloadFactory,
9688 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9689{
9690 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9691 workloadFactory,
9692 memoryManager,
9693 true,
9694 armnn::DataLayout::NHWC);
9695}
9696
9697LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NchwTest(
9698 armnn::IWorkloadFactory& workloadFactory,
9699 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9700{
9701 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9702 workloadFactory,
9703 memoryManager,
9704 true,
9705 armnn::DataLayout::NCHW);
9706}
9707
9708LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NhwcTest(
9709 armnn::IWorkloadFactory& workloadFactory,
9710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9711{
9712 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9713 workloadFactory,
9714 memoryManager,
9715 true,
9716 armnn::DataLayout::NHWC);
9717}
9718
9719// Simple unbiased
9720LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNchwTest(
9721 armnn::IWorkloadFactory& workloadFactory,
9722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9723{
9724 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9725 workloadFactory,
9726 memoryManager,
9727 false,
9728 armnn::DataLayout::NCHW);
9729}
9730
9731LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNhwcTest(
9732 armnn::IWorkloadFactory& workloadFactory,
9733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9734{
9735 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9736 workloadFactory,
9737 memoryManager,
9738 false,
9739 armnn::DataLayout::NHWC);
9740}
9741
9742LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NchwTest(
9743 armnn::IWorkloadFactory& workloadFactory,
9744 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9745{
9746 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9747 workloadFactory,
9748 memoryManager,
9749 false,
9750 armnn::DataLayout::NCHW);
9751}
9752
9753LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NhwcTest(
9754 armnn::IWorkloadFactory& workloadFactory,
9755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9756{
9757 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9758 workloadFactory,
9759 memoryManager,
9760 false,
9761 armnn::DataLayout::NHWC);
9762}
9763
9764LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NchwTest(
9765 armnn::IWorkloadFactory& workloadFactory,
9766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9767{
9768 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9769 workloadFactory,
9770 memoryManager,
9771 false,
9772 armnn::DataLayout::NCHW);
9773}
9774
9775LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NhwcTest(
9776 armnn::IWorkloadFactory& workloadFactory,
9777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9778{
9779 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9780 workloadFactory,
9781 memoryManager,
9782 false,
9783 armnn::DataLayout::NHWC);
9784}
9785
9786// Padded biased
9787LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNchwTest(
9788 armnn::IWorkloadFactory& workloadFactory,
9789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9790{
9791 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9792 workloadFactory,
9793 memoryManager,
9794 true,
9795 armnn::DataLayout::NCHW);
9796}
9797
9798LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNhwcTest(
9799 armnn::IWorkloadFactory& workloadFactory,
9800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9801{
9802 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9803 workloadFactory,
9804 memoryManager,
9805 true,
9806 armnn::DataLayout::NHWC);
9807}
9808
9809LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NchwTest(
9810 armnn::IWorkloadFactory& workloadFactory,
9811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9812{
9813 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9814 workloadFactory,
9815 memoryManager,
9816 true,
9817 armnn::DataLayout::NCHW);
9818}
9819
9820LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NhwcTest(
9821 armnn::IWorkloadFactory& workloadFactory,
9822 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9823{
9824 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9825 workloadFactory,
9826 memoryManager,
9827 true,
9828 armnn::DataLayout::NHWC);
9829}
9830
9831LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NchwTest(
9832 armnn::IWorkloadFactory& workloadFactory,
9833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9834{
9835 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9836 workloadFactory,
9837 memoryManager,
9838 true,
9839 armnn::DataLayout::NCHW);
9840}
9841
9842LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NhwcTest(
9843 armnn::IWorkloadFactory& workloadFactory,
9844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9845{
9846 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9847 workloadFactory,
9848 memoryManager,
9849 true,
9850 armnn::DataLayout::NHWC);
9851}
9852
9853// Padded unbiased
9854LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNchwTest(
9855 armnn::IWorkloadFactory& workloadFactory,
9856 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9857{
9858 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9859 workloadFactory,
9860 memoryManager,
9861 false,
9862 armnn::DataLayout::NCHW);
9863}
9864
9865LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNhwcTest(
9866 armnn::IWorkloadFactory& workloadFactory,
9867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9868{
9869 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9870 workloadFactory,
9871 memoryManager,
9872 false,
9873 armnn::DataLayout::NHWC);
9874}
9875
9876LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NchwTest(
9877 armnn::IWorkloadFactory& workloadFactory,
9878 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9879{
9880 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9881 workloadFactory,
9882 memoryManager,
9883 false,
9884 armnn::DataLayout::NCHW);
9885}
9886
9887LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NhwcTest(
9888 armnn::IWorkloadFactory& workloadFactory,
9889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9890{
9891 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9892 workloadFactory,
9893 memoryManager,
9894 false,
9895 armnn::DataLayout::NHWC);
9896}
9897
9898LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NchwTest(
9899 armnn::IWorkloadFactory& workloadFactory,
9900 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9901{
9902 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9903 workloadFactory,
9904 memoryManager,
9905 false,
9906 armnn::DataLayout::NCHW);
9907}
9908
9909LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NhwcTest(
9910 armnn::IWorkloadFactory& workloadFactory,
9911 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9912{
9913 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9914 workloadFactory,
9915 memoryManager,
9916 false,
9917 armnn::DataLayout::NHWC);
9918}
9919
9920// Strided biased
9921LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNchwTest(
9922 armnn::IWorkloadFactory& workloadFactory,
9923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9924{
9925 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9926 workloadFactory,
9927 memoryManager,
9928 true,
9929 armnn::DataLayout::NCHW);
9930}
9931
9932LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNhwcTest(
9933 armnn::IWorkloadFactory& workloadFactory,
9934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9935{
9936 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9937 workloadFactory,
9938 memoryManager,
9939 true,
9940 armnn::DataLayout::NHWC);
9941}
9942
9943LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NchwTest(
9944 armnn::IWorkloadFactory& workloadFactory,
9945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9946{
9947 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9948 workloadFactory,
9949 memoryManager,
9950 true,
9951 armnn::DataLayout::NCHW);
9952}
9953
9954LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NhwcTest(
9955 armnn::IWorkloadFactory& workloadFactory,
9956 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9957{
9958 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9959 workloadFactory,
9960 memoryManager,
9961 true,
9962 armnn::DataLayout::NHWC);
9963}
9964
9965LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NchwTest(
9966 armnn::IWorkloadFactory& workloadFactory,
9967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9968{
9969 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9970 workloadFactory,
9971 memoryManager,
9972 true,
9973 armnn::DataLayout::NCHW);
9974}
9975
9976LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NhwcTest(
9977 armnn::IWorkloadFactory& workloadFactory,
9978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9979{
9980 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9981 workloadFactory,
9982 memoryManager,
9983 true,
9984 armnn::DataLayout::NHWC);
9985}
9986
9987// Strided unbiased
9988LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNchwTest(
9989 armnn::IWorkloadFactory& workloadFactory,
9990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9991{
9992 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9993 workloadFactory,
9994 memoryManager,
9995 false,
9996 armnn::DataLayout::NCHW);
9997}
9998
9999LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNhwcTest(
10000 armnn::IWorkloadFactory& workloadFactory,
10001 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10002{
10003 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10004 workloadFactory,
10005 memoryManager,
10006 false,
10007 armnn::DataLayout::NHWC);
10008}
10009
10010LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NchwTest(
10011 armnn::IWorkloadFactory& workloadFactory,
10012 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10013{
10014 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10015 workloadFactory,
10016 memoryManager,
10017 false,
10018 armnn::DataLayout::NCHW);
10019}
10020
10021LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NhwcTest(
10022 armnn::IWorkloadFactory& workloadFactory,
10023 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10024{
10025 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10026 workloadFactory,
10027 memoryManager,
10028 false,
10029 armnn::DataLayout::NHWC);
10030}
10031
10032LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NchwTest(
10033 armnn::IWorkloadFactory& workloadFactory,
10034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10035{
10036 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10037 workloadFactory,
10038 memoryManager,
10039 false,
10040 armnn::DataLayout::NCHW);
10041}
10042
10043LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NhwcTest(
10044 armnn::IWorkloadFactory& workloadFactory,
10045 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10046{
10047 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10048 workloadFactory,
10049 memoryManager,
10050 false,
10051 armnn::DataLayout::NHWC);
10052}