blob: a088aaa9472eff2ebddcdcdd850c138224a32fb9 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008#include "TypeUtils.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
33#include "ReshapeTestImpl.hpp"
34#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000035#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000036#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000046
telsoa01c577f2c2018-08-31 09:22:23 +010047// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000048static std::vector<float> ConvInput3x8x16({
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
73});
74
telsoa01c577f2c2018-08-31 09:22:23 +010075// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000076static std::vector<float> Bias2({0, 2});
77
telsoa01c577f2c2018-08-31 09:22:23 +010078// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000079template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +000080boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
81{
82 if(biasEnabled)
83 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000084 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000085 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
86 return bias;
87 }
88 else
89 {
90 return boost::multi_array<T, 1>();
91 }
92}
93
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000094template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000095LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
96 armnn::IWorkloadFactory& workloadFactory,
97 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
98 float qScale,
99 int32_t qOffset,
100 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000101 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000102{
telsoa01c577f2c2018-08-31 09:22:23 +0100103 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000104 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000105 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
106
telsoa01c577f2c2018-08-31 09:22:23 +0100107 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000108 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000109 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
110 QuantizedVector<T>(qScale, qOffset, {
111 1, 1, 1,
112 1, -1, 1,
113 1, 1, 1,
114 1, 1, 1,
115 1, 1, 1,
116
117 0, 0, 0,
118 0, 0, 0,
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122
123 2, 2, 2,
124 2, 2, 2,
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128
129
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135
136 1, 1, 1,
137 1, 1, 1,
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141
142 0, 0, 0,
143 0, 0, 0,
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0
147 })));
148
telsoa01c577f2c2018-08-31 09:22:23 +0100149 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000150 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000151 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
152 QuantizedVector<T>(qScale, qOffset, {
153 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
154 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
155 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
156 -23.5f, -23.5f, -23.5f,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159
160 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
161 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
164 })));
165
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000166 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
167 workloadFactory,
168 memoryManager,
169 input,
170 kernel,
171 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
172 expectedOutput,
173 qScale,
174 qOffset,
175 layout);
telsoa014fcda012018-03-09 14:13:49 +0000176}
177
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000178template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
179 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000180LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
181 armnn::IWorkloadFactory& workloadFactory,
182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
183 float qScale,
184 int32_t qOffset,
185 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000186 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000187{
telsoa01c577f2c2018-08-31 09:22:23 +0100188 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000189
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000191 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000192 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
193
telsoa01c577f2c2018-08-31 09:22:23 +0100194 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000195 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000196 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
197 QuantizedVector<T>(qScale, qOffset, {
198 1, 1, 1,
199 1, -1, 1,
200 1, 1, 1,
201
202 0, 0, 0,
203 0, 0, 0,
204 0, 0, 0,
205
206 2, 2, 2,
207 2, 2, 2,
208 2, 2, 2,
209
210
211 0, 0, 0,
212 0, 0, 0,
213 0, 0, 0,
214
215 1, 1, 1,
216 1, 1, 1,
217 1, 1, 1,
218
219 0, 0, 0,
220 0, 0, 0,
221 0, 0, 0
222 })));
223
telsoa01c577f2c2018-08-31 09:22:23 +0100224 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000225 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000226 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
227 QuantizedVector<T>(qScale, qOffset, {
228 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
229 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
230 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
231 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234
235 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
236 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
241 })));
242
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000243 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
244 workloadFactory,
245 memoryManager,
246 input,
247 kernel,
248 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
249 expectedOutput,
250 qScale,
251 qOffset,
252 layout);
telsoa014fcda012018-03-09 14:13:49 +0000253}
254
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000255template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000256LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
257 armnn::IWorkloadFactory& workloadFactory,
258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
259 float qScale,
260 int32_t qOffset,
261 bool biasEnabled,
262 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100263{
264 // Use common single-batch 5x5 image.
265
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000266 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100267 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
268 {
269 1, 5, 2, 3,
270 8, 7, 3, 6,
271 3, 3, 9, 1
272 });
273
274
275 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000276 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100277 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
278 4, 5, 6,
279 0, 0, 0,
280 3, 2, 1
281 });
282
283 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000284 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100285
286 const std::vector<float> outputData =
287 {
288 23, 41, 33, 21,
289 44, 65, 76, 52,
290 82, 85, 79, 42
291 };
292
293 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
294
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000295 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
296 workloadFactory,
297 memoryManager,
298 input,
299 kernel,
300 boost::multi_array<T, 1>(),
301 expectedOutput,
302 dataLayout,
303 qScale,
304 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100305}
306
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000307template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000308LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
309 armnn::IWorkloadFactory& workloadFactory,
310 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
311 float qScale,
312 int32_t qOffset,
313 bool biasEnabled,
314 const armnn::DataLayout& dataLayout)
315{
316 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000317 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000318 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
319 {
320 1, 5, 2, 3, 5,
321 8, 7, 3, 6, 3,
322 3, 3, 9, 1, 9,
323 4, 1, 8, 1, 3,
324 6, 8, 1, 9, 2
325 });
326
327 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000328 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000329 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
330 {
331 4, 5, 6,
332 0, 0, 0,
333 3, 2, 1
334 });
335
336 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000337 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000338
339 const std::vector<T> outputData =
340 {
341 23, 33, 24,
342 91, 99, 48,
343 26, 50, 19
344 };
345
346 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
347
348 uint32_t padLeft = 1;
349 uint32_t padTop = 1;
350 uint32_t padRight = 1;
351 uint32_t padBottom = 1;
352 uint32_t strideX = 2;
353 uint32_t strideY = 2;
354
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000355 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
356 workloadFactory,
357 memoryManager,
358 input,
359 kernel,
360 boost::multi_array<T, 1>(),
361 expectedOutput,
362 dataLayout,
363 qScale,
364 qOffset,
365 padLeft,
366 padTop,
367 padRight,
368 padBottom,
369 strideX,
370 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000371}
372
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000373LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
374 armnn::IWorkloadFactory& workloadFactory,
375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
376 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000377 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000378{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000379 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
380 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000381}
382
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000383LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
384 armnn::IWorkloadFactory& workloadFactory,
385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
386 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000387 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000388{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000389 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
390 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000391}
392
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000393LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
394 armnn::IWorkloadFactory& workloadFactory,
395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
396 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000397 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000398{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000399 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
400 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000401}
402
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000403LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
404 armnn::IWorkloadFactory& workloadFactory,
405 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
406 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100407{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000408 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
409 workloadFactory,
410 memoryManager,
411 0.f,
412 0,
413 biasEnabled,
414 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100415}
416
Mike Kelly7332ed82018-12-20 17:03:06 +0000417LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
418 armnn::IWorkloadFactory& workloadFactory,
419 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
420 bool biasEnabled,
421 const armnn::DataLayout layout)
422{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000423 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
424 workloadFactory,
425 memoryManager,
426 0.f,
427 0,
428 biasEnabled,
429 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000430}
431
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000432LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
433 armnn::IWorkloadFactory& workloadFactory,
434 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
435 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000436 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000437{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000438 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
439 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000440}
441
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000442template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
443 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000444LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
445 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000447 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000448 float qScale,
449 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000450{
telsoa01c577f2c2018-08-31 09:22:23 +0100451 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000452 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000453 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
454 QuantizedVector<T>(qScale, qOffset, {
455 11,21,31,
456 12,22,32,
457 13,23,33
458 })));
459
telsoa01c577f2c2018-08-31 09:22:23 +0100460 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000461 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000462 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
463 QuantizedVector<T>(qScale, qOffset, {
464 -11,-21,
465 -12,-22,
466 })));
467
telsoa01c577f2c2018-08-31 09:22:23 +0100468// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000469// Manually calculated like this:
470//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
471//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
472//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
473//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
474//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
475//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
476//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000477 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000478 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
479 QuantizedVector<T>(qScale, qOffset, {
480 0, 0, 0, 0, 0, 0,
481 -242, -594, -934, -372, 0, 0,
482 -495, -1190, -1850, -725, 0, 0,
483 -538, -1256, -1916, -748, 0, 0,
484 -273, -626, -946, -363, 0, 0,
485 0, 0, 0, 0, 0, 0,
486 0, 0, 0, 0, 0, 0,
487 0, 0, 0, 0, 0, 0
488 })));
489
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000490 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
491 workloadFactory,
492 memoryManager,
493 input,
494 kernel,
495 GetBias2<ArmnnBType>(false, qScale, qOffset),
496 expectedOutput,
497 qScale,
498 qOffset,
499 layout,
500 1, // Padding left.
501 2, // Padding top.
502 3, // Padding right.
503 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000504}
505
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000506template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
507 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000508LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
509 armnn::IWorkloadFactory& workloadFactory,
510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000511 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000512 float qScale,
513 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000514{
telsoa01c577f2c2018-08-31 09:22:23 +0100515 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000516 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000517 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
518 QuantizedVector<T>(qScale, qOffset, {
519 11,21,31,41,51,
520 12,22,32,42,52,
521 13,23,33,43,53,
522 14,24,34,44,54,
523 15,25,35,45,55,
524 })));
525
telsoa01c577f2c2018-08-31 09:22:23 +0100526 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000527 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000528 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
529 QuantizedVector<T>(qScale, qOffset, {
530 -11,-21,-31,-41,
531 -12,-22,-32,-42,
532 -13,-23,-33,-43,
533 -14,-24,-34,-44,
534 })));
535
telsoa01c577f2c2018-08-31 09:22:23 +0100536 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000537 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000538 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
539 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
540 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000541 -7140, -10580, -13940, -9300, -5230,
542 -9590, -14120, -18520, -12290, -6860,
543 -9980, -14560, -18960, -12560, -7000,
544 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100545 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000546 })));
547
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000548 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
549 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000550 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000551 input,
552 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000553 GetBias2<ArmnnBType>(false, qScale, qOffset),
telsoa014fcda012018-03-09 14:13:49 +0000554 expectedOutput,
555 qScale,
556 qOffset,
narpra015f703182018-10-26 16:24:58 +0100557 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100558 1, // Padding left.
559 1, // Padding top.
560 2, // Padding right.
561 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100562}
563
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000564template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
565 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000566LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
567 armnn::IWorkloadFactory& workloadFactory,
568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
569 float qScale,
570 int32_t qOffset,
571 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000572 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100573{
telsoa01c577f2c2018-08-31 09:22:23 +0100574 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000575 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100576 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
577 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
578 0, 1, 2, 3, 4,
579 5, 6, 7, 8, 9,
580 10, 11, 12, 13, 14,
581 15, 16, 17, 18, 19,
582 20, 21, 22, 23, 24,
583
584 25, 26, 27, 28, 29,
585 30, 31, 32, 33, 34,
586 35, 36, 37, 38, 39,
587 40, 41, 42, 43, 44,
588 45, 46, 47, 48, 49
589 })));
590
telsoa01c577f2c2018-08-31 09:22:23 +0100591 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000592 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100593 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
594 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
595 32, 31, 30, 29,
596 28, 27, 26, 25,
597 24, 23, 22, 21,
598 20, 19, 18, 17,
599
600 16, 15, 14, 13,
601 12, 11, 10, 9,
602 8, 7, 6, 5,
603 4, 3, 2, 1
604 })));
605
telsoa01c577f2c2018-08-31 09:22:23 +0100606 // Expected output is 1 batch of a 2-channel 5x5 image.
607 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000608 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100609 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
610 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
611 1062, 1580, 1850, 1530, 1117,
612 2140, 3108, 3500, 2842, 2042,
613 3580, 5068, 5460, 4342, 3062,
614 3618, 5072, 5390, 4248, 2971,
615 3074, 4282, 4510, 3533, 2457,
616 1550, 2284, 2362, 1955, 1428,
617 2910, 4206, 4342, 3528, 2536,
618 3390, 4886, 5022, 4068, 2916,
619 3566, 5056, 5182, 4133, 2922,
620 3100, 4352, 4452, 3517, 2465
621 })));
622
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000623 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
624 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000625 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100626 input,
627 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000628 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
surmeh013537c2c2018-05-18 16:31:43 +0100629 expectedOutput,
630 qScale,
631 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100632 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100633 1, // Padding left.
634 1, // Padding top.
635 2, // Padding right.
636 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100637 1, // strideX
638 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000639}
640
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000641template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
642 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000643LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
644 armnn::IWorkloadFactory& workloadFactory,
645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
646 float qScale,
647 int32_t qOffset,
648 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100649{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000650 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100651 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
652 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
653 0, 25,
654 1, 26,
655 2, 27,
656 3, 28,
657 4, 29,
658
659 5, 30,
660 6, 31,
661 7, 32,
662 8, 33,
663 9, 34,
664
665 10, 35,
666 11, 36,
667 12, 37,
668 13, 38,
669 14, 39,
670
671 15, 40,
672 16, 41,
673 17, 42,
674 18, 43,
675 19, 44,
676
677 20, 45,
678 21, 46,
679 22, 47,
680 23, 48,
681 24, 49
682 })));
683
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000684 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100685 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
686 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
Matteo Martincigh747ef822018-12-18 09:26:39 +0000687 32, 31, 30, 29,
688 28, 27, 26, 25,
689 24, 23, 22, 21,
690 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100691
Matteo Martincigh747ef822018-12-18 09:26:39 +0000692 16, 15, 14, 13,
693 12, 11, 10, 9,
694 8, 7, 6, 5,
695 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +0100696 })));
697
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000698 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100699 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
700 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
701 1062, 1550,
702 1580, 2284,
703 1850, 2362,
704 1530, 1955,
705 1117, 1428,
706
707 2140, 2910,
708 3108, 4206,
709 3500, 4342,
710 2842, 3528,
711 2042, 2536,
712
713 3580, 3390,
714 5068, 4886,
715 5460, 5022,
716 4342, 4068,
717 3062, 2916,
718
719 3618, 3566,
720 5072, 5056,
721 5390, 5182,
722 4248, 4133,
723 2971, 2922,
724
725 3074, 3100,
726 4282, 4352,
727 4510, 4452,
728 3533, 3517,
729 2457, 2465
730 })));
731
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000732 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
733 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000734 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100735 input,
736 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000737 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
Nikhil Rajcec6b652018-10-12 13:51:57 +0100738 expectedOutput,
739 qScale,
740 qOffset,
741 1, // Padding left.
742 1, // Padding top.
743 2, // Padding right.
744 2, // Padding bottom.
745 1, // strideX
746 1); // strideY
747}
748
telsoa014fcda012018-03-09 14:13:49 +0000749LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000750Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
751 armnn::IWorkloadFactory& workloadFactory,
752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000753 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000754{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000755 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
756 <armnn::DataType::Float32, armnn::DataType::Float32>(
757 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000758}
759
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000760LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
761 armnn::IWorkloadFactory& workloadFactory,
762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000763 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000764{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000765 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000766 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000767}
768
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000769LayerTestResult<float, 4> DepthwiseConvolution2dTest(
770 armnn::IWorkloadFactory& workloadFactory,
771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
772 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000773 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000774{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000775 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000776 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000777}
778
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000779LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
780 armnn::IWorkloadFactory& workloadFactory,
781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
782 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100783{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000784 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
785 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100786}
787
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000788LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
789 armnn::IWorkloadFactory& workloadFactory,
790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
791 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000792 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000793{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000794 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000795 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000796}
797
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000798LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
799 armnn::IWorkloadFactory& workloadFactory,
800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
801 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000802 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100803{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000804 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000805 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100806}
807
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000808LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
809 armnn::IWorkloadFactory& workloadFactory,
810 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
811 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000812 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000813{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000814 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000815 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000816}
817
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000818LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
819 armnn::IWorkloadFactory& workloadFactory,
820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
821 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000822 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000823{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000824 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000825 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000826}
827
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000828LayerTestResult<float, 4> Convolution1dTest(
829 armnn::IWorkloadFactory& workloadFactory,
830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
831 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000832{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000833 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
834 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000835}
836
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000837LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
838 armnn::IWorkloadFactory& workloadFactory,
839 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
840 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000841{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000842 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
843 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000844}
845
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000846LayerTestResult<float,4> CompareConvolution2dTest(
847 armnn::IWorkloadFactory& workloadFactory,
848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
849 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000850{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000851 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
852 workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000853}
854
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000855LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000856 armnn::IWorkloadFactory& workloadFactory,
857 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
858 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000859 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000860{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000861 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
862 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000863}
864
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000865LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
866 armnn::IWorkloadFactory& workloadFactory,
867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
868 armnn::IWorkloadFactory& refWorkloadFactory,
869 const armnn::DataLayout layout)
870{
871 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
872 workloadFactory, memoryManager, refWorkloadFactory, layout);
873}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000874
875LayerTestResult<float,4> SimpleNormalizationAcrossTest(
876 armnn::IWorkloadFactory& workloadFactory,
877 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000878{
879 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
880 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000881 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000882}
883
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000884LayerTestResult<float,4> SimpleNormalizationWithinTest(
885 armnn::IWorkloadFactory& workloadFactory,
886 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000887{
888 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
889 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000890 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000891}
892
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000893LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
894 armnn::IWorkloadFactory& workloadFactory,
895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +0100896{
897 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
898 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000899 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100900}
901
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000902LayerTestResult<float,2> SimpleSoftmaxTest(
903 armnn::IWorkloadFactory& workloadFactory,
904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
905 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000906{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000907 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000908}
909
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000910LayerTestResult<float,3> Simple3dSoftmaxTest(
911 armnn::IWorkloadFactory& workloadFactory,
912 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
913 float beta)
914{
915 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
916}
917
918LayerTestResult<float,4> Simple4dSoftmaxTest(
919 armnn::IWorkloadFactory& workloadFactory,
920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
921 float beta)
922{
923 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
924}
925
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000926LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
927 armnn::IWorkloadFactory& workloadFactory,
928 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
929 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000930{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000931 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000932}
933
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000934LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
935 armnn::IWorkloadFactory& workloadFactory,
936 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
937 float beta)
938{
939 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
940}
941
942LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
943 armnn::IWorkloadFactory& workloadFactory,
944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
945 float beta)
946{
947 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
948}
949
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000950LayerTestResult<float,4> CompareNormalizationTest(
951 armnn::IWorkloadFactory& workloadFactory,
952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
953 armnn::IWorkloadFactory& refWorkloadFactory,
954 armnn::NormalizationAlgorithmChannel normChannel,
955 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000956{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000957 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000958}
959
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000960LayerTestResult<float,2> CompareSoftmaxTest(
961 armnn::IWorkloadFactory& workloadFactory,
962 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000963 armnn::IWorkloadFactory& refWorkloadFactory,
964 float beta)
965{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000966 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
967 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000968}
969
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000970LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
971 armnn::IWorkloadFactory& workloadFactory,
972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000973 armnn::IWorkloadFactory& refWorkloadFactory,
974 float beta)
975{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000976 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
977 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000978}
979
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000980std::vector<LayerTestResult<float,3>> SplitterTest(
981 armnn::IWorkloadFactory& workloadFactory,
982 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000983{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000984 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000985}
986
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000987std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
988 armnn::IWorkloadFactory& workloadFactory,
989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000990{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000991 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000992}
993
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000994LayerTestResult<float, 3> CopyViaSplitterTest(
995 armnn::IWorkloadFactory& workloadFactory,
996 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000997{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000998 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000999}
1000
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001001LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1002 armnn::IWorkloadFactory& workloadFactory,
1003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001004{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001005 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001006}
1007
telsoa01c577f2c2018-08-31 09:22:23 +01001008LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001009 armnn::IWorkloadFactory& workloadFactory,
1010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001011{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001012 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001013 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1014 { 2., 3., 3., 4. }));
1015
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001016 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001017 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1018 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1019 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001020 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
1021 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001022}
1023
1024LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001025 armnn::IWorkloadFactory& workloadFactory,
1026 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001027{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001028 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001029 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1030 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1031 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1032
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001033 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001034 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1035 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1036 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1037 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1038 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1039 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1040 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1041 0.02168f}));
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00001042 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001043}
1044
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001045LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1046 armnn::IWorkloadFactory& workloadFactory,
1047 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001048{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001049 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001050 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1051 {2., 3., 3., 4.}));
1052
1053
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001054 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001055 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1056 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1057 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1058
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001059 return LstmNoCifgNoPeepholeNoProjectionTestImpl(
1060 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001061}
1062
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001063LayerTestResult<float,3> MergerTest(
1064 armnn::IWorkloadFactory& workloadFactory,
1065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001066{
surmeh013537c2c2018-05-18 16:31:43 +01001067 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001068 unsigned int outputHeight = 6;
1069 unsigned int outputChannels = 3;
1070
surmeh013537c2c2018-05-18 16:31:43 +01001071 unsigned int inputWidth1 = 3;
1072 unsigned int inputHeight1 = 6;
1073 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001074
surmeh013537c2c2018-05-18 16:31:43 +01001075 unsigned int inputWidth2 = 3;
1076 unsigned int inputHeight2 = 6;
1077 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001078
telsoa01c577f2c2018-08-31 09:22:23 +01001079 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001080 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1081 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1082 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001083
1084 LayerTestResult<float,3> ret(outputTensorInfo);
1085
telsoa014fcda012018-03-09 14:13:49 +00001086 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001087 {
1088 1.0f, 2.0f, 3.0f,
1089 4.0f, 5.0f, 6.0f,
1090 7.0f, 8.0f, 9.0f,
1091 10.0f, 11.0f, 12.0f,
1092 13.0f, 14.0f, 15.0f,
1093 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001094
surmeh013537c2c2018-05-18 16:31:43 +01001095 19.0f, 20.0f, 21.0f,
1096 22.0f, 23.0f, 24.0f,
1097 25.0f, 26.0f, 27.0f,
1098 28.0f, 29.0f, 30.0f,
1099 31.0f, 32.0f, 33.0f,
1100 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001101
surmeh013537c2c2018-05-18 16:31:43 +01001102 37.0f, 38.0f, 39.0f,
1103 40.0f, 41.0f, 42.0f,
1104 43.0f, 44.0f, 45.0f,
1105 46.0f, 47.0f, 48.0f,
1106 49.0f, 50.0f, 51.0f,
1107 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001108 })
1109 );
1110
telsoa014fcda012018-03-09 14:13:49 +00001111 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1112 {
surmeh013537c2c2018-05-18 16:31:43 +01001113 1.0f, 2.0f, 3.0f,
1114 4.0f, 5.0f, 6.0f,
1115 7.0f, 8.0f, 9.0f,
1116 10.0f, 11.0f, 12.0f,
1117 13.0f, 14.0f, 15.0f,
1118 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001119
surmeh013537c2c2018-05-18 16:31:43 +01001120 19.0f, 20.0f, 21.0f,
1121 22.0f, 23.0f, 24.0f,
1122 25.0f, 26.0f, 27.0f,
1123 28.0f, 29.0f, 30.0f,
1124 31.0f, 32.0f, 33.0f,
1125 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001126 })
1127 );
1128
1129 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1130 {
surmeh013537c2c2018-05-18 16:31:43 +01001131 37.0f, 38.0f, 39.0f,
1132 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001133 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001134 46.0f, 47.0f, 48.0f,
1135 49.0f, 50.0f, 51.0f,
1136 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001137 })
1138 );
1139
telsoa01c577f2c2018-08-31 09:22:23 +01001140 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00001141 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
1142
telsoa01c577f2c2018-08-31 09:22:23 +01001143 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00001144 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
1145
telsoa014fcda012018-03-09 14:13:49 +00001146 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1147
1148 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1149
1150 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1151 subTensorsSupported ?
1152 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1153 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1154
1155 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1156 subTensorsSupported ?
1157 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1158 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1159
telsoa014fcda012018-03-09 14:13:49 +00001160 armnn::MergerQueueDescriptor data;
1161 armnn::WorkloadInfo info;
1162 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1163 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001164 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1165
1166 data.m_ViewOrigins.push_back(window1);
1167 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001168
1169 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
1170
1171 inputHandle1->Allocate();
1172 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001173 outputHandle->Allocate();
1174
1175 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1176 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001177
1178 workload->Execute();
1179
1180 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1181
1182 return ret;
1183}
1184
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001185LayerTestResult<float,4> AdditionTest(
1186 armnn::IWorkloadFactory& workloadFactory,
1187 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001188{
1189 unsigned int batchSize = 2;
1190 unsigned int channels = 2;
1191 unsigned int height = 2;
1192 unsigned int width = 3;
1193
1194 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1195 armnn::TensorInfo outputTensorInfo;
1196
1197 unsigned int shape[] = {batchSize, channels, height, width};
1198
1199 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1200 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1201 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1202
1203
1204 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1205 {
1206 0.0f, 2.0f, 1.0f,
1207 0.2f, 1.0f, 2.0f,
1208
1209 1.0f, 2.0f, 1.0f,
1210 0.2f, 1.0f, 2.0f,
1211
1212 0.0f, 2.0f, 1.0f,
1213 4.2f, 1.0f, 2.0f,
1214
1215 0.0f, 0.0f, 1.0f,
1216 0.2f, 1.0f, 2.0f,
1217 }));
1218
1219 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1220 {
1221 1.0f, 2.0f, 1.0f,
1222 0.0f, 1.0f, 2.0f,
1223
1224 1.0f, 2.0f, -2.0f,
1225 0.2f, 1.0f, 2.0f,
1226
1227 0.0f, 2.0f, 1.0f,
1228 4.2f, 0.0f, -3.0f,
1229
1230 0.0f, 0.0f, 1.0f,
1231 0.7f, 1.0f, 5.0f,
1232 }));
1233
1234 LayerTestResult<float,4> ret(outputTensorInfo);
1235 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1236 {
1237 1.0f, 4.0f, 2.0f,
1238 0.2f, 2.0f, 4.0f,
1239
1240 2.0f, 4.0f, -1.0f,
1241 0.4f, 2.0f, 4.0f,
1242
1243 0.0f, 4.0f, 2.0f,
1244 8.4f, 1.0f, -1.0f,
1245
1246 0.0f, 0.0f, 2.0f,
1247 0.9f, 2.0f, 7.0f,
1248 }));
1249
1250 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1251 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1252 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1253
1254 armnn::AdditionQueueDescriptor data;
1255 armnn::WorkloadInfo info;
1256 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1257 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1258 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1259
1260 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1261
1262 inputHandle1->Allocate();
1263 inputHandle2->Allocate();
1264 outputHandle->Allocate();
1265
1266 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1267 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1268
1269 workload->Execute();
1270
1271 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1272
1273 return ret;
1274}
1275
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001276template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001277LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1278 armnn::IWorkloadFactory& workloadFactory,
1279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001280 float qScale,
1281 int32_t qOffset)
1282{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001283 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
1284 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
1285 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001286
1287 if (armnn::IsQuantizedType<T>())
1288 {
1289 inputTensorInfo1.SetQuantizationScale(qScale);
1290 inputTensorInfo1.SetQuantizationOffset(qOffset);
1291 inputTensorInfo2.SetQuantizationScale(qScale);
1292 inputTensorInfo2.SetQuantizationOffset(qOffset);
1293 outputTensorInfo.SetQuantizationScale(qScale);
1294 outputTensorInfo.SetQuantizationOffset(qOffset);
1295 }
1296
1297 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1298 {
1299 0.0f,
1300 1.0f,
1301
1302 2.0f,
1303 3.0f,
1304
1305 4.0f,
1306 5.0f,
1307 }));
1308
1309 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1310 {
1311 0.5f, 1.5f, 2.5f,
1312 3.5f, 4.5f, 5.5f,
1313 }));
1314
1315 LayerTestResult<T,4> ret(outputTensorInfo);
1316 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1317 {
1318 0.5f, 1.5f, 2.5f,
1319 4.5f, 5.5f, 6.5f,
1320
1321 2.5f, 3.5f, 4.5f,
1322 6.5f, 7.5f, 8.5f,
1323
1324 4.5f, 5.5f, 6.5f,
1325 8.5f, 9.5f, 10.5f,
1326 }));
1327
1328 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1329 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1330 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1331
1332 armnn::AdditionQueueDescriptor data;
1333 armnn::WorkloadInfo info;
1334 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1335 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1336 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1337
1338 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1339
1340 inputHandle1->Allocate();
1341 inputHandle2->Allocate();
1342 outputHandle->Allocate();
1343
1344 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1345 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1346
1347 workload->Execute();
1348
1349 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1350
1351 return ret;
1352}
1353
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001354template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001355LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1356 armnn::IWorkloadFactory& workloadFactory,
1357 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001358 float qScale,
1359 int32_t qOffset)
1360{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001361 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
1362 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
1363 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001364
1365 if (armnn::IsQuantizedType<T>())
1366 {
1367 inputTensorInfo1.SetQuantizationScale(qScale);
1368 inputTensorInfo1.SetQuantizationOffset(qOffset);
1369 inputTensorInfo2.SetQuantizationScale(qScale);
1370 inputTensorInfo2.SetQuantizationOffset(qOffset);
1371 outputTensorInfo.SetQuantizationScale(qScale);
1372 outputTensorInfo.SetQuantizationOffset(qOffset);
1373 }
1374
1375 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1376 {
1377 0.0f, 1.0f, 2.0f,
1378 3.0f, 4.0f, 5.0f,
1379 6.0f, 7.0f, 8.0f,
1380 9.0f, 10.0f, 11.0f,
1381 12.0f, 13.0f, 14.0f,
1382 15.0f, 16.0f, 17.0f,
1383 }));
1384
1385 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1386 {
1387 0.5f,
1388 }));
1389
1390 LayerTestResult<T,4> ret(outputTensorInfo);
1391 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1392 {
1393 0.5f, 1.5f, 2.5f,
1394 3.5f, 4.5f, 5.5f,
1395 6.5f, 7.5f, 8.5f,
1396 9.5f, 10.5f, 11.5f,
1397 12.5f, 13.5f, 14.5f,
1398 15.5f, 16.5f, 17.5f,
1399 }));
1400
1401 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1402 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1403 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1404
1405 armnn::AdditionQueueDescriptor data;
1406 armnn::WorkloadInfo info;
1407 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1408 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1409 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1410
1411 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1412
1413 inputHandle1->Allocate();
1414 inputHandle2->Allocate();
1415 outputHandle->Allocate();
1416
1417 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1418 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1419
1420 workload->Execute();
1421
1422 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1423
1424 return ret;
1425}
1426
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001427LayerTestResult<float, 4> AdditionBroadcastTest(
1428 armnn::IWorkloadFactory& workloadFactory,
1429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001430{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001431 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
1432 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001433}
1434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001435LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1436 armnn::IWorkloadFactory& workloadFactory,
1437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001438{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001439 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
1440 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001441}
1442
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001443LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1444 armnn::IWorkloadFactory& workloadFactory,
1445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001446{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001447 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
1448 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001449}
1450
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001451LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1452 armnn::IWorkloadFactory& workloadFactory,
1453 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001454{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001455 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
1456 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001457}
1458
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001459LayerTestResult<float,4> CompareAdditionTest(
1460 armnn::IWorkloadFactory& workloadFactory,
1461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1462 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001463{
1464 unsigned int batchSize = 4;
1465 unsigned int channels = 1;
1466 unsigned int height = 2;
1467 unsigned int width = 3;
1468
1469 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1470 armnn::TensorInfo outputTensorInfo;
1471
1472 unsigned int shape[] = {batchSize, channels, height, width};
1473
1474 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1475 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1476 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1477
1478 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1479 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1480
1481 LayerTestResult<float,4> ret(outputTensorInfo);
1482
1483 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1484 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1485 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1486
1487 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1488 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1489 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1490
1491 armnn::AdditionQueueDescriptor data;
1492 armnn::WorkloadInfo info;
1493 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1494 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1495 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1496
1497 armnn::AdditionQueueDescriptor refData = data;
1498 armnn::WorkloadInfo refInfo = info;
1499 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1500 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1501 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1502
1503 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1504 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1505
1506 inputHandle1->Allocate();
1507 inputHandle2->Allocate();
1508 outputHandle->Allocate();
1509 inputHandle1Ref->Allocate();
1510 inputHandle2Ref->Allocate();
1511 outputHandleRef->Allocate();
1512
1513 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1514 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1515 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1516 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1517
1518 workload->Execute();
1519 workloadRef->Execute();
1520
1521 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1522 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1523
1524 return ret;
1525}
1526
surmeh01bceff2f2018-03-29 16:29:27 +01001527namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001528template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001529LayerTestResult<T, 4> DivisionTestHelper(
1530 armnn::IWorkloadFactory& workloadFactory,
1531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1532 const unsigned int shape0[4],
1533 const std::vector<T>& values0,
1534 float scale0,
1535 int32_t offset0,
1536 const unsigned int shape1[4],
1537 const std::vector<T> & values1,
1538 float scale1,
1539 int32_t offset1,
1540 const unsigned int outShape[4],
1541 const std::vector<T> & outValues,
1542 float outScale,
1543 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001544{
1545 auto dataType = (std::is_same<T, uint8_t>::value ?
1546 armnn::DataType::QuantisedAsymm8 :
1547 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001548
David Beck5cd01f32018-09-12 16:00:08 +01001549 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1550 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1551 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001552
David Beck5cd01f32018-09-12 16:00:08 +01001553 inputTensorInfo0.SetQuantizationScale(scale0);
1554 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001555
David Beck5cd01f32018-09-12 16:00:08 +01001556 inputTensorInfo1.SetQuantizationScale(scale1);
1557 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001558
David Beck5cd01f32018-09-12 16:00:08 +01001559 outputTensorInfo.SetQuantizationScale(outScale);
1560 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001561
David Beck5cd01f32018-09-12 16:00:08 +01001562 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1563 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001564
David Beck5cd01f32018-09-12 16:00:08 +01001565 LayerTestResult<T, 4> result(outputTensorInfo);
1566 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001567
David Beck5cd01f32018-09-12 16:00:08 +01001568 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1569 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1570 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001571
David Beck5cd01f32018-09-12 16:00:08 +01001572 armnn::DivisionQueueDescriptor data;
1573 armnn::WorkloadInfo info;
1574 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1575 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1576 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001577
David Beck5cd01f32018-09-12 16:00:08 +01001578 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001579
David Beck5cd01f32018-09-12 16:00:08 +01001580 inputHandle0->Allocate();
1581 inputHandle1->Allocate();
1582 outputHandle->Allocate();
1583
1584 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1585 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1586
David Beck5cd01f32018-09-12 16:00:08 +01001587 workload->Execute();
1588
1589 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1590
1591 return result;
1592}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001593} // anonymous namespace
1594
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001595LayerTestResult<float,4> DivisionByZeroTest(
1596 armnn::IWorkloadFactory& workloadFactory,
1597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001598{
1599 const unsigned int width = 2;
1600 const unsigned int height = 2;
1601 const unsigned int channelCount = 2;
1602 const unsigned int batchSize = 2;
1603
1604 unsigned int shape[] = { batchSize, channelCount, height, width };
1605
1606 std::vector<float> input0({
1607 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1608 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1609
1610 std::vector<float> input1({
1611 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1612 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1613
1614 std::vector<float> output({
1615 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1616 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1617
David Beck5cd01f32018-09-12 16:00:08 +01001618 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001619 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001620 shape, input0, 1.0f, 0,
1621 shape, input1, 1.0f, 0,
1622 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001623}
1624
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001625LayerTestResult<float,4> DivisionTest(
1626 armnn::IWorkloadFactory& workloadFactory,
1627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001628{
1629 const unsigned int width = 2;
1630 const unsigned int height = 2;
1631 const unsigned int channelCount = 2;
1632 const unsigned int batchSize = 2;
1633
1634 unsigned int shape[] = { batchSize, channelCount, height, width };
1635
1636 std::vector<float> input0({
1637 2, 2, 2, 2, 3, 3, 3, 3,
1638 4, 4, 4, 4, 5, 5, 5, 5 });
1639
1640 std::vector<float> input1({
1641 1, 1, 1, 1, 2, 2, 2, 2,
1642 4, 4, 4, 4, 4, 4, 4, 4 });
1643
1644 std::vector<float> output({
1645 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1646 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1647
David Beck5cd01f32018-09-12 16:00:08 +01001648
1649 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001650 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001651 shape, input0, 1.0f, 0,
1652 shape, input1, 1.0f, 0,
1653 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001654}
1655
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001656LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1657 armnn::IWorkloadFactory& workloadFactory,
1658 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001659{
1660 unsigned int shape0[] = { 1, 2, 2, 2 };
1661 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1662
1663 unsigned int shape1[] = { 1, 1, 1, 1 };
1664 std::vector<float> input1({ 2 });
1665
1666 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1667
David Beck5cd01f32018-09-12 16:00:08 +01001668
1669 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001670 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001671 shape0, input0, 1.0f, 0,
1672 shape1, input1, 1.0f, 0,
1673 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001674}
1675
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001676LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1677 armnn::IWorkloadFactory& workloadFactory,
1678 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001679{
1680 unsigned int shape0[] = { 1, 3, 3, 2 };
1681 std::vector<float> input0({
1682 1, 4, 3, 8, 5, 12,
1683 7, 16, 9, 20, 11, 24,
1684 13, 28, 15, 32, 17, 36});
1685
1686 unsigned int shape1[] = { 1, 1, 1, 2 };
1687 std::vector<float> input1({ 1, 2 });
1688
1689 std::vector<float> output({
1690 1, 2, 3, 4, 5, 6,
1691 7, 8, 9, 10, 11, 12,
1692 13, 14, 15, 16, 17, 18});
1693
David Beck5cd01f32018-09-12 16:00:08 +01001694 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001695 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001696 shape0, input0, 1.0f, 0,
1697 shape1, input1, 1.0f, 0,
1698 shape0, output, 1.0f, 0);
1699}
1700
1701
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001702LayerTestResult<uint8_t,4> DivisionUint8Test(
1703 armnn::IWorkloadFactory& workloadFactory,
1704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001705{
1706 const unsigned int width = 2;
1707 const unsigned int height = 2;
1708 const unsigned int channelCount = 2;
1709 const unsigned int batchSize = 2;
1710
1711 unsigned int shape[] = { batchSize, channelCount, height, width };
1712
1713 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1714 4, 4, 4, 4, 5, 5, 5, 5 });
1715
1716 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1717 4, 4, 4, 4, 4, 4, 4, 4 });
1718
1719 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1720 4, 4, 4, 4, 5, 5, 5, 5});
1721
1722
1723 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001724 memoryManager,
1725 shape, input0, 1.0f, 0,
1726 shape, input1, 1.0f, 0,
1727 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001728}
1729
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001730LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1731 armnn::IWorkloadFactory& workloadFactory,
1732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001733{
1734 unsigned int shape0[] = { 1, 2, 2, 2 };
1735 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1736
1737 unsigned int shape1[] = { 1, 1, 1, 1 };
1738 std::vector<uint8_t> input1({ 2 });
1739
1740 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1741
1742 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001743 memoryManager,
1744 shape0, input0, 1.0f, 0,
1745 shape1, input1, 1.0f, 0,
1746 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001747}
1748
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001749LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1750 armnn::IWorkloadFactory& workloadFactory,
1751 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001752{
1753 unsigned int shape0[] = { 1, 3, 3, 2 };
1754 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1755 7, 16, 9, 20, 11, 24,
1756 13, 28, 15, 32, 17, 36});
1757
1758 unsigned int shape1[] = { 1, 1, 1, 2 };
1759 std::vector<uint8_t> input1({ 1, 2 });
1760
1761 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1762 7, 8, 9, 10, 11, 12,
1763 13, 14, 15, 16, 17, 18});
1764
1765 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001766 memoryManager,
1767 shape0, input0, 1.0f, 0,
1768 shape1, input1, 1.0f, 0,
1769 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001770}
1771
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001772template<typename DescriptorType>
1773std::unique_ptr<armnn::IWorkload> CreateWorkload(
1774 const armnn::IWorkloadFactory& workloadFactory,
1775 const armnn::WorkloadInfo& info,
1776 const DescriptorType& descriptor)
1777{
1778 return CreateWorkload(workloadFactory, info, descriptor);
1779};
1780
1781template<>
1782std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
1783 const armnn::IWorkloadFactory& workloadFactory,
1784 const armnn::WorkloadInfo& info,
1785 const armnn::MaximumQueueDescriptor& descriptor)
1786{
1787 return workloadFactory.CreateMaximum(descriptor, info);
1788}
1789
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001790template<>
1791std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
1792 const armnn::IWorkloadFactory& workloadFactory,
1793 const armnn::WorkloadInfo& info,
1794 const armnn::MinimumQueueDescriptor& descriptor)
1795{
1796 return workloadFactory.CreateMinimum(descriptor, info);
1797}
1798
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001799template<>
1800std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
1801 const armnn::IWorkloadFactory& workloadFactory,
1802 const armnn::WorkloadInfo& info,
1803 const armnn::EqualQueueDescriptor& descriptor)
1804{
1805 return workloadFactory.CreateEqual(descriptor, info);
1806}
1807
FrancisMurtagh878f0232018-12-19 10:56:15 +00001808template<>
1809std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
1810 const armnn::IWorkloadFactory& workloadFactory,
1811 const armnn::WorkloadInfo& info,
1812 const armnn::GreaterQueueDescriptor& descriptor)
1813{
1814 return workloadFactory.CreateGreater(descriptor, info);
1815}
1816
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001817namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00001818
1819template <typename Descriptor,
1820 armnn::DataType ArmnnTypeInput,
1821 armnn::DataType ArmnnTypeOutput,
1822 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
1823 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
1824LayerTestResult<TOutput, 4> ElementwiseTestHelper(
1825 armnn::IWorkloadFactory & workloadFactory,
1826 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
1827 const unsigned int shape0[4], std::vector<TInput> values0,
1828 const unsigned int shape1[4], std::vector<TInput> values1,
1829 const unsigned int outShape[4], std::vector<TOutput> outValues,
1830 float qScale = 0.0f, int qOffset = 0)
1831{
1832 const size_t dimensionCount = 4;
1833 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
1834 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
1835 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
1836
1837 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
1838 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
1839
1840 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001841 {
kevmay012b4d88e2019-01-24 14:05:09 +00001842 inputTensorInfo0.SetQuantizationScale(qScale);
1843 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001844
kevmay012b4d88e2019-01-24 14:05:09 +00001845 inputTensorInfo1.SetQuantizationScale(qScale);
1846 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001847
kevmay012b4d88e2019-01-24 14:05:09 +00001848 outputTensorInfo.SetQuantizationScale(qScale);
1849 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001850 }
kevmay012b4d88e2019-01-24 14:05:09 +00001851
1852 LayerTestResult<TOutput,4> ret(outputTensorInfo);
1853
1854 if(ArmnnTypeOutput == armnn::DataType::Boolean)
1855 {
1856 ret.compareBoolean = true;
1857 }
1858
1859 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1860 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1861 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1862
1863 Descriptor data;
1864 armnn::WorkloadInfo info;
1865 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1866 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1867 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1868 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
1869
1870 inputHandle0->Allocate();
1871 inputHandle1->Allocate();
1872 outputHandle->Allocate();
1873
1874 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1875 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1876
1877 ExecuteWorkload(*workload, memoryManager);
1878
1879 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1880
1881 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
1882 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001883}
1884
kevmay012b4d88e2019-01-24 14:05:09 +00001885template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
1886LayerTestResult<T, 4> ElementwiseTestHelper(
1887 armnn::IWorkloadFactory & workloadFactory,
1888 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
1889 const unsigned int shape0[4], std::vector<T> values0,
1890 const unsigned int shape1[4], std::vector<T> values1,
1891 const unsigned int outShape[4], std::vector<T> outValues,
1892 float qScale = 0.0f, int qOffset = 0)
1893{
1894 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
1895 (workloadFactory,
1896 memoryManager,
1897 shape0,
1898 values0,
1899 shape1,
1900 values1,
1901 outShape,
1902 outValues,
1903 qScale,
1904 qOffset);
1905}
1906}
1907
1908LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
1909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001910{
1911 const unsigned int width = 2;
1912 const unsigned int height = 2;
1913 const unsigned int channelCount = 2;
1914 const unsigned int batchSize = 2;
1915
1916 unsigned int shape[] = { batchSize, channelCount, height, width };
1917
1918 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
1919 3, 3, 3, 3, 4, 4, 4, 4 });
1920
1921 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
1922 5, 5, 5, 5, 4, 4, 4, 4 });
1923
kevmay012b4d88e2019-01-24 14:05:09 +00001924 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
1925 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001926
kevmay012b4d88e2019-01-24 14:05:09 +00001927 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001928 workloadFactory,
1929 memoryManager,
1930 shape,
1931 input0,
1932 shape,
1933 input1,
1934 shape,
1935 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001936}
1937
kevmay012b4d88e2019-01-24 14:05:09 +00001938LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001939 armnn::IWorkloadFactory& workloadFactory,
1940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1941{
1942 unsigned int shape0[] = { 1, 2, 2, 2 };
1943 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1944
1945 unsigned int shape1[] = { 1, 1, 1, 1 };
1946 std::vector<float> input1({ 1 });
1947
kevmay012b4d88e2019-01-24 14:05:09 +00001948 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001949
kevmay012b4d88e2019-01-24 14:05:09 +00001950 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001951 workloadFactory,
1952 memoryManager,
1953 shape0,
1954 input0,
1955 shape1,
1956 input1,
1957 shape0,
1958 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001959}
1960
kevmay012b4d88e2019-01-24 14:05:09 +00001961LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001962 armnn::IWorkloadFactory& workloadFactory,
1963 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1964{
1965 const unsigned int shape0[] = { 1, 2, 2, 3 };
1966 const unsigned int shape1[] = { 1, 1, 1, 3 };
1967
1968 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
1969 7, 8, 9, 10, 11, 12 });
1970
1971 std::vector<float> input1({ 1, 2, 3});
1972
kevmay012b4d88e2019-01-24 14:05:09 +00001973 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
1974 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001975
kevmay012b4d88e2019-01-24 14:05:09 +00001976 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001977 workloadFactory,
1978 memoryManager,
1979 shape0,
1980 input0,
1981 shape1,
1982 input1,
1983 shape0,
1984 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001985}
1986
1987LayerTestResult<uint8_t, 4> EqualUint8Test(
1988 armnn::IWorkloadFactory& workloadFactory,
1989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1990{
1991 unsigned int shape[] = { 2, 2, 2, 2 };
1992
1993 // See dequantized values to the right.
1994 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00001995 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001996
1997 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
1998 3, 3, 3, 3, 5, 5, 5, 5 });
1999
2000 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2001 1, 1, 1, 1, 0, 0, 0, 0 });
2002
kevmay012b4d88e2019-01-24 14:05:09 +00002003 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2004 armnn::DataType::QuantisedAsymm8,
2005 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002006 workloadFactory,
2007 memoryManager,
2008 shape,
2009 input0,
2010 shape,
2011 input1,
2012 shape,
2013 output,
2014 1.0f,
2015 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002016}
2017
2018LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2019 armnn::IWorkloadFactory& workloadFactory,
2020 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2021{
2022 const unsigned int shape0[] = { 1, 2, 2, 3 };
2023 const unsigned int shape1[] = { 1, 1, 1, 1 };
2024
2025 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2026 7, 8, 9, 10, 11, 12 });
2027
2028 std::vector<uint8_t> input1({ 1 });
2029
2030 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2031 0, 0, 0, 0, 0, 0 });
2032
kevmay012b4d88e2019-01-24 14:05:09 +00002033 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2034 armnn::DataType::QuantisedAsymm8,
2035 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002036 workloadFactory,
2037 memoryManager,
2038 shape0,
2039 input0,
2040 shape1,
2041 input1,
2042 shape0,
2043 output,
2044 1.0f,
2045 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002046}
2047
2048LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2049 armnn::IWorkloadFactory& workloadFactory,
2050 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2051{
2052 const unsigned int shape0[] = { 1, 2, 2, 3 };
2053 const unsigned int shape1[] = { 1, 1, 1, 3 };
2054
2055 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2056 7, 8, 9, 10, 11, 12 });
2057
2058 std::vector<uint8_t> input1({ 1, 1, 3});
2059
2060 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2061 0, 0, 0, 0, 0, 0 });
2062
kevmay012b4d88e2019-01-24 14:05:09 +00002063 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2064 armnn::DataType::QuantisedAsymm8,
2065 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002066 workloadFactory,
2067 memoryManager,
2068 shape0,
2069 input0,
2070 shape1,
2071 input1,
2072 shape0,
2073 output,
2074 1.0f,
2075 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002076}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002077
kevmay012b4d88e2019-01-24 14:05:09 +00002078LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2080{
2081 const unsigned int width = 2;
2082 const unsigned int height = 2;
2083 const unsigned int channelCount = 2;
2084 const unsigned int batchSize = 2;
2085
2086 unsigned int shape[] = { batchSize, channelCount, height, width };
2087
2088 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2089 3, 3, 3, 3, 4, 4, 4, 4 });
2090
2091 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2092 5, 5, 5, 5, 4, 4, 4, 4 });
2093
kevmay012b4d88e2019-01-24 14:05:09 +00002094 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2095 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002096
kevmay012b4d88e2019-01-24 14:05:09 +00002097 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002098 workloadFactory,
2099 memoryManager,
2100 shape,
2101 input0,
2102 shape,
2103 input1,
2104 shape,
2105 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002106}
2107
kevmay012b4d88e2019-01-24 14:05:09 +00002108LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002109 armnn::IWorkloadFactory& workloadFactory,
2110 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2111{
2112 unsigned int shape0[] = { 1, 2, 2, 2 };
2113 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2114
2115 unsigned int shape1[] = { 1, 1, 1, 1 };
2116 std::vector<float> input1({ 1 });
2117
kevmay012b4d88e2019-01-24 14:05:09 +00002118 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002119
kevmay012b4d88e2019-01-24 14:05:09 +00002120 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002121 workloadFactory,
2122 memoryManager,
2123 shape0,
2124 input0,
2125 shape1,
2126 input1,
2127 shape0,
2128 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002129}
2130
kevmay012b4d88e2019-01-24 14:05:09 +00002131LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002132 armnn::IWorkloadFactory& workloadFactory,
2133 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2134{
2135 const unsigned int shape0[] = { 1, 2, 2, 3 };
2136 const unsigned int shape1[] = { 1, 1, 1, 3 };
2137
2138 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2139 7, 8, 9, 10, 11, 12 });
2140
2141 std::vector<float> input1({ 1, 3, 2});
2142
kevmay012b4d88e2019-01-24 14:05:09 +00002143 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2144 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002145
kevmay012b4d88e2019-01-24 14:05:09 +00002146 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002147 workloadFactory,
2148 memoryManager,
2149 shape0,
2150 input0,
2151 shape1,
2152 input1,
2153 shape0,
2154 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002155}
2156
2157LayerTestResult<uint8_t, 4> GreaterUint8Test(
2158 armnn::IWorkloadFactory& workloadFactory,
2159 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2160{
2161 unsigned int shape[] = { 2, 2, 2, 2 };
2162
2163 // See dequantized values to the right.
2164 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2165 3, 3, 3, 3, 5, 5, 5, 5 });
2166
2167 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2168 2, 2, 2, 2, 5, 5, 5, 5 });
2169
2170 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
2171 1, 1, 1, 1, 0, 0, 0, 0 });
2172
kevmay012b4d88e2019-01-24 14:05:09 +00002173 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2174 armnn::DataType::QuantisedAsymm8,
2175 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002176 workloadFactory,
2177 memoryManager,
2178 shape,
2179 input0,
2180 shape,
2181 input1,
2182 shape,
2183 output,
2184 1.0f,
2185 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002186}
2187
2188LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
2189 armnn::IWorkloadFactory& workloadFactory,
2190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2191{
2192 const unsigned int shape0[] = { 1, 2, 2, 3 };
2193 const unsigned int shape1[] = { 1, 1, 1, 1 };
2194
2195 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2196 7, 8, 9, 10, 11, 12 });
2197
2198 std::vector<uint8_t> input1({ 1 });
2199
2200 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
2201 1, 1, 1, 1, 1, 1 });
2202
kevmay012b4d88e2019-01-24 14:05:09 +00002203 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2204 armnn::DataType::QuantisedAsymm8,
2205 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002206 workloadFactory,
2207 memoryManager,
2208 shape0,
2209 input0,
2210 shape1,
2211 input1,
2212 shape0,
2213 output,
2214 1.0f,
2215 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002216}
2217
2218LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
2219 armnn::IWorkloadFactory& workloadFactory,
2220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2221{
2222 const unsigned int shape0[] = { 1, 2, 2, 3 };
2223 const unsigned int shape1[] = { 1, 1, 1, 3 };
2224
2225 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2226 7, 8, 9, 10, 11, 12 });
2227
2228 std::vector<uint8_t> input1({ 1, 1, 3});
2229
2230 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
2231 1, 1, 1, 1, 1, 1 });
2232
kevmay012b4d88e2019-01-24 14:05:09 +00002233 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2234 armnn::DataType::QuantisedAsymm8,
2235 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002236 workloadFactory,
2237 memoryManager,
2238 shape0,
2239 input0,
2240 shape1,
2241 input1,
2242 shape0,
2243 output,
2244 1.0f,
2245 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002246}
2247
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002248LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2250{
2251 const unsigned int width = 2;
2252 const unsigned int height = 2;
2253 const unsigned int channelCount = 2;
2254 const unsigned int batchSize = 2;
2255
2256 unsigned int shape[] = { batchSize, channelCount, height, width };
2257
2258 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2259 3, 3, 3, 3, 4, 4, 4, 4 });
2260
2261 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2262 4, 4, 4, 4, 5, 5, 5, 5 });
2263
2264 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
2265 4, 4, 4, 4, 5, 5, 5, 5 });
2266
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002267 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2268 workloadFactory,
2269 memoryManager,
2270 shape,
2271 input0,
2272 shape,
2273 input1,
2274 shape,
2275 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002276}
2277
2278LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
2279 armnn::IWorkloadFactory& workloadFactory,
2280 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2281{
2282 unsigned int shape0[] = { 1, 2, 2, 2 };
2283 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2284
2285 unsigned int shape1[] = { 1, 1, 1, 1 };
2286 std::vector<float> input1({ 2 });
2287
2288 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
2289
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002290 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2291 workloadFactory,
2292 memoryManager,
2293 shape0,
2294 input0,
2295 shape1,
2296 input1,
2297 shape0,
2298 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002299}
2300
2301LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
2302 armnn::IWorkloadFactory& workloadFactory,
2303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2304{
2305 const unsigned int shape0[] = { 1, 2, 2, 3 };
2306 const unsigned int shape1[] = { 1, 1, 1, 3 };
2307
2308 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2309 7, 8, 9, 10, 11, 12 });
2310
2311 std::vector<float> input1({ 1, 2, 3});
2312
2313 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002314 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002315
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002316 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2317 workloadFactory,
2318 memoryManager,
2319 shape0,
2320 input0,
2321 shape1,
2322 input1,
2323 shape0,
2324 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002325}
2326
2327LayerTestResult<uint8_t, 4> MaximumUint8Test(
2328 armnn::IWorkloadFactory& workloadFactory,
2329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2330{
2331 unsigned int shape[] = { 2, 2, 2, 2 };
2332
2333 // See dequantized values to the right.
2334 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2335 3, 3, 3, 3, 4, 4, 4, 4 });
2336
2337 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2338 4, 4, 4, 4, 5, 5, 5, 5 });
2339
2340 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2341 4, 4, 4, 4, 5, 5, 5, 5 });
2342
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002343 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2344 workloadFactory,
2345 memoryManager,
2346 shape,
2347 input0,
2348 shape,
2349 input1,
2350 shape,
2351 output,
2352 1.0f,
2353 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002354}
2355
2356LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2357 armnn::IWorkloadFactory& workloadFactory,
2358 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2359{
2360 const unsigned int shape0[] = { 1, 2, 2, 3 };
2361 const unsigned int shape1[] = { 1, 1, 1, 1 };
2362
2363 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2364 7, 8, 9, 10, 11, 12 });
2365
2366 std::vector<uint8_t> input1({2});
2367
2368 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2369 7, 8, 9, 10, 11, 12 });
2370
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002371 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2372 workloadFactory,
2373 memoryManager,
2374 shape0,
2375 input0,
2376 shape1,
2377 input1,
2378 shape0,
2379 output,
2380 1.0f,
2381 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002382}
2383
2384LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2385 armnn::IWorkloadFactory& workloadFactory,
2386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2387{
2388 const unsigned int shape0[] = { 1, 2, 2, 3 };
2389 const unsigned int shape1[] = { 1, 1, 1, 3 };
2390
2391 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2392 7, 8, 9, 10, 11, 12 });
2393
2394 std::vector<uint8_t> input1({ 1, 10, 3});
2395
2396 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2397 7, 10, 9, 10, 11, 12 });
2398
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002399 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2400 workloadFactory,
2401 memoryManager,
2402 shape0,
2403 input0,
2404 shape1,
2405 input1,
2406 shape0,
2407 output,
2408 1.0f,
2409 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002410}
2411
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002412LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
2413 armnn::IWorkloadFactory& workloadFactory,
2414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2415{
2416 unsigned int shape0[] = { 1, 2, 2, 2 };
2417 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2418
2419 unsigned int shape1[] = { 1, 1, 1, 1 };
2420 std::vector<float> input1({ 2 });
2421
2422 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
2423
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002424 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2425 workloadFactory,
2426 memoryManager,
2427 shape0,
2428 input0,
2429 shape1,
2430 input1,
2431 shape0,
2432 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002433}
2434
2435
2436LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
2437 armnn::IWorkloadFactory& workloadFactory,
2438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2439{
2440 unsigned int shape0[] = { 1, 2, 2, 2 };
2441 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
2442
2443 unsigned int shape1[] = { 1, 1, 1, 1 };
2444 std::vector<float> input1({ 5 });
2445
2446 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
2447
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002448 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2449 workloadFactory,
2450 memoryManager,
2451 shape0,
2452 input0,
2453 shape1,
2454 input1,
2455 shape0,
2456 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002457}
2458
2459LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
2460 armnn::IWorkloadFactory & workloadFactory,
2461 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
2462{
2463 const unsigned int shape0[] = { 1, 2, 2, 3 };
2464 const unsigned int shape1[] = { 1, 1, 1, 3 };
2465
2466 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
2467 7, 1, 2, 3, 4, 5 });
2468
2469 std::vector<uint8_t> input1({ 1, 2, 3});
2470
2471 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
2472 1, 1, 2, 1, 2, 3 });
2473
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002474 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2475 workloadFactory,
2476 memoryManager,
2477 shape0,
2478 input0,
2479 shape1,
2480 input1,
2481 shape0,
2482 output,
2483 1.0f,
2484 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002485}
2486
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002487namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002488LayerTestResult<float,4> MultiplicationTestHelper(
2489 armnn::IWorkloadFactory& workloadFactory,
2490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2491 const unsigned int shape0[4],
2492 const std::vector<float> & values0,
2493 const unsigned int shape1[4],
2494 const std::vector<float> & values1,
2495 const unsigned int outShape[4],
2496 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00002497{
surmeh01bceff2f2018-03-29 16:29:27 +01002498 const size_t dimensionCount = 4;
2499 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
2500 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
2501 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00002502
surmeh01bceff2f2018-03-29 16:29:27 +01002503 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
2504 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00002505
2506 LayerTestResult<float,4> ret(outputTensorInfo);
2507
2508 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2509 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2510 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2511
2512 armnn::MultiplicationQueueDescriptor data;
2513 armnn::WorkloadInfo info;
2514 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2515 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2516 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2517
2518 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2519
2520 inputHandle0->Allocate();
2521 inputHandle1->Allocate();
2522 outputHandle->Allocate();
2523
2524 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2525 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2526
2527 workload->Execute();
2528
2529 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2530
surmeh01bceff2f2018-03-29 16:29:27 +01002531 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00002532 return ret;
2533}
surmeh01bceff2f2018-03-29 16:29:27 +01002534} // anonymous namespace
2535
2536
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002537LayerTestResult<float,4> MultiplicationTest(
2538 armnn::IWorkloadFactory& workloadFactory,
2539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002540{
2541 const unsigned int width = 2;
2542 const unsigned int height = 2;
2543 const unsigned int channelCount = 2;
2544 const unsigned int batchSize = 2;
2545
2546 unsigned int shape[] = { batchSize, channelCount, height, width };
2547
2548 std::vector<float> input0({
2549 1, 1, 1, 1, 2, 2, 2, 2,
2550 3, 3, 3, 3, 4, 4, 4, 4 });
2551
2552 std::vector<float> input1({
2553 2, 2, 2, 2, 3, 3, 3, 3,
2554 4, 4, 4, 4, 5, 5, 5, 5 });
2555
2556 std::vector<float> output({
2557 2, 2, 2, 2, 6, 6, 6, 6,
2558 12, 12, 12, 12, 20, 20, 20, 20 });
2559
2560 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002561 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002562 shape,
2563 input0,
2564 shape,
2565 input1,
2566 shape,
2567 output);
2568}
2569
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002570LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
2571 armnn::IWorkloadFactory& workloadFactory,
2572 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002573{
2574 unsigned int shape0[] = { 1, 2, 2, 2 };
2575 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2576
2577 unsigned int shape1[] = { 1, 1, 1, 1 };
2578 std::vector<float> input1({ 2 });
2579
2580 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
2581
2582 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002583 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002584 shape0,
2585 input0,
2586 shape1,
2587 input1,
2588 shape0,
2589 output);
2590}
2591
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002592LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
2593 armnn::IWorkloadFactory& workloadFactory,
2594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002595{
2596 unsigned int shape0[] = { 1, 3, 3, 2 };
2597 std::vector<float> input0({
2598 1, 2, 3, 4, 5, 6,
2599 7, 8, 9, 10, 11, 12,
2600 13, 14, 15, 16, 17, 18});
2601
2602 unsigned int shape1[] = { 1, 1, 1, 2 };
2603 std::vector<float> input1({ 1, 2 });
2604
2605 std::vector<float> output({
2606 1, 4, 3, 8, 5, 12,
2607 7, 16, 9, 20, 11, 24,
2608 13, 28, 15, 32, 17, 36});
2609
2610 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002611 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002612 shape0,
2613 input0,
2614 shape1,
2615 input1,
2616 shape0,
2617 output);
2618}
telsoa014fcda012018-03-09 14:13:49 +00002619
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002620LayerTestResult<float,4> CompareMultiplicationTest(
2621 armnn::IWorkloadFactory& workloadFactory,
2622 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2623 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002624{
2625 const unsigned int width = 16;
2626 const unsigned int height = 32;
2627 const unsigned int channelCount = 2;
2628 const unsigned int batchSize = 5;
2629
2630 armnn::TensorInfo inputTensorInfo0;
2631 armnn::TensorInfo inputTensorInfo1;
2632 armnn::TensorInfo outputTensorInfo;
2633
2634 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
2635
2636 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2637 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2638 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2639
2640 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
2641
2642 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
2643 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
2644
2645 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2646 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2647 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2648
2649 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
2650 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2651 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2652
2653 armnn::MultiplicationQueueDescriptor data;
2654 armnn::WorkloadInfo info;
2655 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2656 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2657 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2658
2659 armnn::MultiplicationQueueDescriptor refData = data;
2660 armnn::WorkloadInfo refInfo = info;
2661 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
2662 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
2663 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2664
2665 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2666 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
2667
2668 inputHandle0->Allocate();
2669 inputHandle1->Allocate();
2670 outputHandle->Allocate();
2671 inputHandle0Ref->Allocate();
2672 inputHandle1Ref->Allocate();
2673 outputHandleRef->Allocate();
2674
2675 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2676 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2677 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
2678 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2679
2680 workload->Execute();
2681 workloadRef->Execute();
2682
2683 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
2684 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
2685
2686 return comparisonResult;
2687}
2688
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002689LayerTestResult<float,4> CompareBatchNormTest(
2690 armnn::IWorkloadFactory& workloadFactory,
2691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2692 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002693{
2694 const unsigned int width = 2;
2695 const unsigned int height = 3;
2696 const unsigned int channels = 5;
2697 const unsigned int batchSize = 3;
2698
2699 armnn::TensorInfo inputTensorInfo;
2700 armnn::TensorInfo outputTensorInfo;
2701 armnn::TensorInfo tensorInfo;
2702
2703 constexpr unsigned int shape[] = {batchSize, channels, height, width};
2704 constexpr unsigned int tensorShape[] = {channels};
2705
2706 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2707 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2708 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
2709
2710 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
2711
2712 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
2713 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
2714 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
2715 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
2716
2717 LayerTestResult<float,4> ret(outputTensorInfo);
2718
2719 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2720 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2721
2722 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
2723 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2724
2725 armnn::BatchNormalizationQueueDescriptor data;
2726 armnn::WorkloadInfo info;
2727 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
2728 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
2729 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
2730 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
2731
2732 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
2733 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
2734 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
2735 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
2736
2737 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2738 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2739 data.m_Mean = &meanTensor;
2740 data.m_Variance = &varianceTensor;
2741 data.m_Beta = &betaTensor;
2742 data.m_Gamma = &gammaTensor;
2743 data.m_Parameters.m_Eps = 0.01f;
2744
2745 armnn::BatchNormalizationQueueDescriptor refData = data;
2746 armnn::WorkloadInfo refInfo = info;
2747 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
2748 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2749
2750 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
2751 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
2752
2753 inputHandle->Allocate();
2754 outputHandle->Allocate();
2755 inputHandleRef->Allocate();
2756 outputHandleRef->Allocate();
2757
2758 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2759 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
2760
2761 workload->Execute();
2762 workloadRef->Execute();
2763
2764 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2765 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2766
2767 return ret;
2768}
2769
surmeh013537c2c2018-05-18 16:31:43 +01002770template<typename T>
2771void PermuteTensorData(
2772 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002774 const armnn::PermutationVector& mappings,
2775 armnn::TensorInfo & inputTensorInfo,
2776 const T * inputData,
2777 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00002778{
surmeh013537c2c2018-05-18 16:31:43 +01002779 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
2780 if (inputData == nullptr)
2781 {
2782 // Nullptr is an error in the test. By returning without doing the concatenation
2783 // I expect the caller to fail the test. It still makes sense to report this as
2784 // an assert for Debug builds.
2785 return;
2786 }
telsoa014fcda012018-03-09 14:13:49 +00002787
surmeh013537c2c2018-05-18 16:31:43 +01002788 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
2789
2790 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2791 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2792
2793 armnn::PermuteQueueDescriptor queueDescriptor;
2794 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
2795 armnn::WorkloadInfo workloadInfo;
2796 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
2797 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2798
2799 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
2800
2801 inputHandle->Allocate();
2802 outputHandle->Allocate();
2803
2804 CopyDataToITensorHandle(inputHandle.get(), inputData);
2805
2806 workload->Execute();
2807
2808 outputData.resize(outputTensorInfo.GetNumElements());
2809 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
2810 inputTensorInfo = outputTensorInfo;
2811}
2812
2813armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
2814 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2815 unsigned int concatDim)
2816{
telsoa014fcda012018-03-09 14:13:49 +00002817 std::vector<armnn::TensorShape> shapes;
2818 shapes.reserve(inputTensorInfos.size());
2819 for (const armnn::TensorInfo& it: inputTensorInfos)
2820 {
2821 shapes.push_back(it.GetShape());
2822 }
surmeh013537c2c2018-05-18 16:31:43 +01002823
2824 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
2825 shapes.end(),
2826 concatDim);
2827}
2828
2829//
narpra015cdda352018-11-19 15:30:27 +00002830// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
2831// In case of <4 dimensions we need to make sure that the concat dimensions are at least
2832// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01002833//
2834
2835bool NeedPermuteForConcat(
2836 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2837 unsigned int concatDim)
2838{
2839 // See note above. Additionally we expect the input shapes to have the
2840 // same number of dimensions.
2841 unsigned int nDimensions = 0;
2842
telsoa01c577f2c2018-08-31 09:22:23 +01002843 // Determine the number of dimensions as well as sanity check them
2844 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01002845 for (auto && tensorInfo : inputTensorInfos)
2846 {
2847 if (!nDimensions)
2848 {
2849 nDimensions = tensorInfo.GetShape().GetNumDimensions();
2850 }
2851 else
2852 {
2853 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
2854 "Input shapes must have the same number of dimensions");
2855 }
2856 }
2857
narpra015cdda352018-11-19 15:30:27 +00002858 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01002859}
2860
2861armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
2862{
2863 unsigned int numDims = inputShape.GetNumDimensions();
2864 if (numDims >= 3)
2865 {
2866 // Nothing to do if the inputShape has at least 3 dimensions.
2867 return inputShape;
2868 }
2869
2870 std::vector<unsigned int> newDims(size_t(3), 1u);
2871 unsigned int expandedBy = 3 - numDims;
2872 for (unsigned int i=0; i<numDims; ++i)
2873 {
2874 newDims[expandedBy+i] = inputShape[i];
2875 }
2876 return armnn::TensorShape(3u, &newDims[0]);
2877}
2878
2879void Generate3dPermuteVectorForConcat(
2880 unsigned int numDimensions,
2881 unsigned int & concatDim,
2882 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
2883{
2884 BOOST_ASSERT_MSG(numDimensions <= 3,
2885 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01002886 unsigned int expandedBy = 3 - numDimensions;
2887 unsigned int expandedConcatAxis = concatDim + expandedBy;
2888
2889 if (expandedConcatAxis == 2)
2890 {
2891 concatDim = 0;
2892 armnn::PermutationVector forwardPermutation({1, 2, 0});
2893 armnn::PermutationVector reversePermutation({2, 0, 1});
2894 permutations = std::make_pair(forwardPermutation, reversePermutation);
2895 }
2896 else if (expandedConcatAxis == 1)
2897 {
2898 concatDim = 0;
2899 armnn::PermutationVector forwardPermutation({2, 0, 1});
2900 armnn::PermutationVector reversePermutation({1, 2, 0});
2901 permutations = std::make_pair(forwardPermutation, reversePermutation);
2902 }
2903 else
2904 {
2905 BOOST_ASSERT(expandedConcatAxis == 0);
2906 concatDim = 0;
2907 }
2908}
2909
2910//
2911// Permute the input tensors so we can do a supported concatenation.
2912// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
2913// at the front. Finally this function tells what the output shape
2914// of the permuted concatenated tensor is going to be.
2915//
2916template <typename T>
2917void PermuteInputsForConcat(
2918 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002920 std::vector<armnn::TensorInfo> & inputTensorInfos,
2921 std::vector<T *> & inputData,
2922 std::vector<std::vector<T>> & inputDataStorage,
2923 armnn::PermutationVector & permuteVector,
2924 unsigned int & concatDim,
2925 armnn::TensorInfo & outputTensorInfo)
2926{
2927 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
2928 "Expecting more than one tensor to be concatenated here");
2929
2930 unsigned int numDims = 0;
2931 unsigned int nthInput = 0;
2932 const armnn::PermutationVector identity({0, 1, 2});
2933
2934 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
2935 std::make_pair(identity, identity);
2936
2937 inputDataStorage.resize(inputData.size());
2938
2939 for (auto && tensorInfo : inputTensorInfos)
2940 {
2941 if (numDims == 0)
2942 {
2943 numDims = tensorInfo.GetShape().GetNumDimensions();
2944 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00002945
telsoa01c577f2c2018-08-31 09:22:23 +01002946 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01002947 permuteVector = permutations.second;
2948 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
2949 "Test logic error, we don't need permutation, so we shouldn't arrive here");
2950 }
2951 else
2952 {
2953 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
2954 "All inputs must have the same number of dimensions");
2955 }
2956
2957 armnn::TensorInfo newTensorInfo = tensorInfo;
2958 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
2959
2960 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002961 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002962 permutations.first,
2963 newTensorInfo,
2964 inputData[nthInput],
2965 inputDataStorage[nthInput]);
2966
2967 inputData[nthInput] = inputDataStorage[nthInput].data();
2968 inputTensorInfos[nthInput] = newTensorInfo;
2969
2970 ++nthInput;
2971 }
2972
2973 outputTensorInfo.SetShape(
2974 armnnUtils::Permuted(
2975 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
2976 permutations.first));
2977}
2978
2979
2980//
2981// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01002982// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01002983// output.
2984//
2985template <typename T>
2986void PermuteOutputForConcat(
2987 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002988 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002989 const armnn::TensorInfo & tensorInfo,
2990 const armnn::PermutationVector & permuteVector,
2991 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
2992 T * data)
2993{
2994 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
2995 if (data == nullptr)
2996 {
2997 // Nullptr is an error in the test. By returning without doing the permutation
2998 // I expect the caller to fail the test. It still makes sense to report this as
2999 // an assert for Debug builds.
3000 return;
3001 }
3002
3003 armnn::TensorInfo resultTensorInfo = tensorInfo;
3004 std::vector<T> inputData(tensorInfo.GetNumElements());
3005 std::vector<T> outputData;
3006
3007 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
3008
3009 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003010 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003011 permuteVector,
3012 resultTensorInfo,
3013 &inputData[0],
3014 outputData);
3015
3016 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
3017}
3018
3019template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003020void Concatenate(
3021 armnn::IWorkloadFactory& workloadFactory,
3022 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3023 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
3024 std::initializer_list<T *> inputsOrig,
3025 const armnn::TensorInfo& outputTensorInfoOrig,
3026 T * output,
narpra015cdda352018-11-19 15:30:27 +00003027 unsigned int concatDim,
3028 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01003029{
3030 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
3031 if (output == nullptr)
3032 {
3033 // Nullptr is an error in the test. By returning without doing the permutation
3034 // I expect the caller to fail the test. It still makes sense to report this as
3035 // an assert for Debug builds.
3036 return;
3037 }
3038
telsoa01c577f2c2018-08-31 09:22:23 +01003039 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01003040 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
3041 std::vector<T *> inputs = inputsOrig;
3042 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
3043
3044 armnn::PermutationVector permuteVector{0, 1, 2};
3045
telsoa01c577f2c2018-08-31 09:22:23 +01003046 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01003047 std::vector<std::vector<T>> tmpInputDataStorage;
3048
3049 const size_t inputCount = inputTensorInfos.size();
3050
3051 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
3052
3053 if (needPermuteForConcat)
3054 {
3055 //
3056 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01003057 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01003058 //
3059 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003060 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003061 inputTensorInfos,
3062 inputs,
3063 tmpInputDataStorage,
3064 permuteVector,
3065 concatDim,
3066 outputTensorInfo);
3067 }
3068
narpra015cdda352018-11-19 15:30:27 +00003069 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00003070
3071 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
3072 inputHandles.reserve(inputCount);
3073
narpra015cdda352018-11-19 15:30:27 +00003074 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3075
3076 armnn::MergerQueueDescriptor queueDescriptor;
3077 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
3078 queueDescriptor.m_Parameters = viewsDescriptor;
3079
3080 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003081 {
narpra015cdda352018-11-19 15:30:27 +00003082 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
3083 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
3084 {
3085 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
3086 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
3087 }
telsoa014fcda012018-03-09 14:13:49 +00003088
narpra015cdda352018-11-19 15:30:27 +00003089 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00003090
narpra015cdda352018-11-19 15:30:27 +00003091 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3092 for (unsigned int i = 0; i < inputCount; ++i)
3093 {
3094 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
3095 std::unique_ptr<armnn::ITensorHandle> inputHandle =
3096 subTensorsSupported ?
3097 workloadFactory.CreateSubTensorHandle(*outputHandle,
3098 inputTensorInfo.GetShape(),
3099 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
3100 workloadFactory.CreateTensorHandle(inputTensorInfo);
3101
3102 inputHandles.emplace_back(std::move(inputHandle));
3103 }
3104
telsoa014fcda012018-03-09 14:13:49 +00003105 }
narpra015cdda352018-11-19 15:30:27 +00003106 else
3107 {
3108 for (unsigned int i = 0; i < inputCount; ++i)
3109 {
3110 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
3111 inputHandles.emplace_back(std::move(inputHandle));
3112 }
3113 }
telsoa014fcda012018-03-09 14:13:49 +00003114
3115 for (unsigned int i = 0; i < inputCount; ++i)
3116 {
surmeh013537c2c2018-05-18 16:31:43 +01003117 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00003118 }
3119
3120 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3121
3122 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
3123
3124 for (auto& inputHandle : inputHandles)
3125 {
3126 inputHandle->Allocate();
3127 }
3128
3129 outputHandle->Allocate();
3130
3131 unsigned int nextInputId = 0;
3132 for (auto& inputHandle : inputHandles)
3133 {
surmeh013537c2c2018-05-18 16:31:43 +01003134 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
3135 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00003136 }
3137
3138 workload->Execute();
3139
surmeh013537c2c2018-05-18 16:31:43 +01003140 if (needPermuteForConcat)
3141 {
3142 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003143 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003144 outputTensorInfo,
3145 permuteVector,
3146 std::move(outputHandle),
3147 output);
3148 }
3149 else
3150 {
3151 CopyDataFromITensorHandle(output, outputHandle.get());
3152 }
telsoa014fcda012018-03-09 14:13:49 +00003153}
3154
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003155template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003156LayerTestResult<T, 1> Concatenation1dTestImpl(
3157 armnn::IWorkloadFactory& workloadFactory,
3158 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3159 float qScale,
3160 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003161{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003162 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003163
3164 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
3165 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
3166 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
3167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003168 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003169
3170 LayerTestResult<T, 1> result(outputTensorInfo);
3171
3172 std::vector<T> output;
3173 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003174 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003175 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3176 { input0.data(), input1.data(), input2.data() },
3177 outputTensorInfo,
3178 output.data(),
3179 0,
3180 true);
telsoa014fcda012018-03-09 14:13:49 +00003181
3182 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
3183 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3184 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
3185 }));
3186
3187 return result;
3188}
3189
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003190LayerTestResult<float, 1> Concatenation1dTest(
3191 armnn::IWorkloadFactory& workloadFactory,
3192 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003193{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003194 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003195}
3196
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003197template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003198LayerTestResult<T, 2> Concatenation2dTestImpl(
3199 armnn::IWorkloadFactory& workloadFactory,
3200 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003201 const armnn::TensorInfo& outputTensorInfo,
3202 unsigned int dimension,
3203 const float qScale,
3204 const int32_t qOffset)
3205{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003206 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003207
3208 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3209 // Batch 0
3210 1.0f, 2.0f, 3.0f,
3211
3212 // Batch 1
3213 10.0f, 11.0f, 12.0f,
3214 }));
3215
3216 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3217 // Batch 0
3218 4.0f, 5.0f, 6.0f,
3219
3220 // Batch 1
3221 13.0f, 14.0f, 15.0f,
3222 }));
3223
3224 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3225 // Batch 0
3226 7.0f, 8.0f, 9.0f,
3227
3228 // Batch 1
3229 16.0f, 17.0f, 18.0f,
3230 }));
3231
3232 LayerTestResult<T, 2> result(outputTensorInfo);
3233
3234 std::vector<T> output;
3235 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003236 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003237 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3238 { input0.data(), input1.data(), input2.data() },
3239 outputTensorInfo,
3240 output.data(),
3241 dimension,
3242 true);
telsoa014fcda012018-03-09 14:13:49 +00003243
3244 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3245 return result;
3246}
3247
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003248template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003249LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
3250 armnn::IWorkloadFactory& workloadFactory,
3251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3252 float qScale,
3253 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003254{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003255 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003257 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3258 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
3259
telsoa014fcda012018-03-09 14:13:49 +00003260 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3261 // Batch 0
3262 1.0f, 2.0f, 3.0f,
3263
3264 // Batch 1
3265 10.0f, 11.0f, 12.0f,
3266
3267 // Batch 2
3268 4.0f, 5.0f, 6.0f,
3269
3270 // Batch 3
3271 13.0f, 14.0f, 15.0f,
3272
3273 // Batch 4
3274 7.0f, 8.0f, 9.0f,
3275
3276 // Batch 5
3277 16.0f, 17.0f, 18.0f,
3278 }));
3279
3280 return result;
3281}
3282
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003283LayerTestResult<float, 2> Concatenation2dDim0Test(
3284 armnn::IWorkloadFactory& workloadFactory,
3285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003286{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003287 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003288}
3289
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003290template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003291LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
3292 armnn::IWorkloadFactory& workloadFactory,
3293 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3294 float qScale,
3295 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003296{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003297 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003298
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003299 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3300 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
3301
telsoa014fcda012018-03-09 14:13:49 +00003302 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3303 // Batch 0
3304 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3305
3306 // Batch 1
3307 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
3308 }));
3309
3310 return result;
3311}
3312
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003313LayerTestResult<float, 2> Concatenation2dDim1Test(
3314 armnn::IWorkloadFactory& workloadFactory,
3315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003316{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003317 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003318}
3319
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003320template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003321LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
3322 armnn::IWorkloadFactory& workloadFactory,
3323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3324 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003325 int32_t qOffset)
3326{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003327 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003328 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3329 // Batch 0
3330 1.0f, 2.0f, 3.0f,
3331
3332 // Batch 1
3333 10.0f, 11.0f, 12.0f,
3334 }));
3335
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003336 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003337 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3338 // Batch 0
3339 4.0f, 5.0f, 6.0f,
3340
3341 // Batch 1
3342 13.0f, 14.0f, 15.0f,
3343
3344 // Batch 0
3345 7.0f, 8.0f, 9.0f,
3346 }));
3347
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003348 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003349 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3350 // Batch 1
3351 16.0f, 17.0f, 18.0f,
3352 }));
3353
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003354 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003355 LayerTestResult<T, 2> result(outputTensorInfo);
3356
3357 std::vector<T> output;
3358 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003359 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003360 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3361 { input0.data(), input1.data(), input2.data() },
3362 outputTensorInfo,
3363 output.data(),
3364 0,
3365 true);
telsoa014fcda012018-03-09 14:13:49 +00003366
3367 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3368 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3369 // Batch 0
3370 1.0f, 2.0f, 3.0f,
3371
3372 // Batch 1
3373 10.0f, 11.0f, 12.0f,
3374
3375 // Batch 2
3376 4.0f, 5.0f, 6.0f,
3377
3378 // Batch 3
3379 13.0f, 14.0f, 15.0f,
3380
3381 // Batch 4
3382 7.0f, 8.0f, 9.0f,
3383
3384 // Batch 5
3385 16.0f, 17.0f, 18.0f,
3386 }));
3387
3388 return result;
3389}
3390
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003391LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
3392 armnn::IWorkloadFactory& workloadFactory,
3393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003394{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003395 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
3396 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003397}
3398
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003399template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003400LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
3401 armnn::IWorkloadFactory& workloadFactory,
3402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3403 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003404 int32_t qOffset)
3405{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003406 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003407 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3408 // Batch 0
3409 1.0f, 2.0f, 3.0f,
3410
3411 // Batch 1
3412 10.0f, 11.0f, 12.0f,
3413 }));
3414
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003415 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003416 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3417 // Batch 0
3418 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
3419
3420 // Batch 1
3421 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
3422 }));
3423
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003424 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003425 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3426 // Batch 0
3427 9.0f,
3428
3429 // Batch 1
3430 18.0f
3431 }));
3432
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003433 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003434 LayerTestResult<T, 2> result(outputTensorInfo);
3435
3436 std::vector<T> output;
3437 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003438 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003439 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3440 { input0.data(), input1.data(), input2.data() },
3441 outputTensorInfo,
3442 output.data(),
3443 1,
3444 true);
telsoa014fcda012018-03-09 14:13:49 +00003445
3446 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3447 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3448 // Batch 0
3449 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3450
3451 // Batch 1
3452 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
3453 }));
3454
3455 return result;
3456}
3457
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003458LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
3459 armnn::IWorkloadFactory& workloadFactory,
3460 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003461{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003462 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
3463 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003464}
3465
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003466template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003467LayerTestResult<T, 3> Concatenation3dTestImpl(
3468 armnn::IWorkloadFactory& workloadFactory,
3469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003470 const armnn::TensorInfo& outputTensorInfo,
3471 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00003472 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00003473 float qScale,
3474 int32_t qOffset)
3475{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003476 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003477
3478 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3479 // Batch 0, Channel 0
3480 1.0f, 2.0f,
3481
3482 // Batch 0, Channel 1
3483 3.0f, 4.0f,
3484
3485 // Batch 0, Channel 2
3486 5.0f, 6.0f,
3487
3488 // Batch 1, Channel 0
3489 19.0f, 20.0f,
3490
3491 // Batch 1, Channel 1
3492 21.0f, 22.0f,
3493
3494 // Batch 1, Channel 2
3495 23.0f, 24.0f
3496 }));
3497
3498 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3499 // Batch 0, Channel 0
3500 7.0f, 8.0f,
3501
3502 // Batch 0, Channel 1
3503 9.0f, 10.0f,
3504
3505 // Batch 0, Channel 2
3506 11.0f, 12.0f,
3507
3508 // Batch 1, Channel 0
3509 25.0f, 26.0f,
3510
3511 // Batch 1, Channel 1
3512 27.0f, 28.0f,
3513
3514 // Batch 1, Channel 2
3515 29.0f, 30.0f
3516 }));
3517
3518 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3519 // Batch 0, Channel 0
3520 13.0f, 14.0f,
3521
3522 // Batch 0, Channel 1
3523 15.0f, 16.0f,
3524
3525 // Batch 0, Channel 2
3526 17.0f, 18.0f,
3527
3528 // Batch 1, Channel 0
3529 31.0f, 32.0f,
3530
3531 // Batch 1, Channel 1
3532 33.0f, 34.0f,
3533
3534 // Batch 1, Channel 2
3535 35.0f, 36.0f
3536 }));
3537
3538 LayerTestResult<T, 3> result(outputTensorInfo);
3539
3540 std::vector<T> output;
3541 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003542 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003543 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3544 { input0.data(), input1.data(), input2.data() },
3545 outputTensorInfo,
3546 output.data(),
3547 dimension,
3548 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003549
3550 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3551 return result;
3552}
3553
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003554template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003555LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
3556 armnn::IWorkloadFactory& workloadFactory,
3557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3558 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003559 int32_t qOffset)
3560{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003561 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003562
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003563 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
3564 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
3565
telsoa014fcda012018-03-09 14:13:49 +00003566 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3567 // Batch 0, Channel 0
3568 1.0f, 2.0f,
3569
3570 // Batch 0, Channel 1
3571 3.0f, 4.0f,
3572
3573 // Batch 0, Channel 2
3574 5.0f, 6.0f,
3575
3576 // Batch 1, Channel 0
3577 19.0f, 20.0f,
3578
3579 // Batch 1, Channel 1
3580 21.0f, 22.0f,
3581
3582 // Batch 1, Channel 2
3583 23.0f, 24.0f,
3584
3585 // Batch 2, Channel 0
3586 7.0f, 8.0f,
3587
3588 // Batch 2, Channel 1
3589 9.0f, 10.0f,
3590
3591 // Batch 2, Channel 2
3592 11.0f, 12.0f,
3593
3594 // Batch 3, Channel 0
3595 25.0f, 26.0f,
3596
3597 // Batch 3, Channel 1
3598 27.0f, 28.0f,
3599
3600 // Batch 3, Channel 2
3601 29.0f, 30.0f,
3602
3603 // Batch 4, Channel 0
3604 13.0f, 14.0f,
3605
3606 // Batch 4, Channel 1
3607 15.0f, 16.0f,
3608
3609 // Batch 4, Channel 2
3610 17.0f, 18.0f,
3611
3612 // Batch 5, Channel 0
3613 31.0f, 32.0f,
3614
3615 // Batch 5, Channel 1
3616 33.0f, 34.0f,
3617
3618 // Batch 5, Channel 2
3619 35.0f, 36.0f
3620 }));
narpra015cdda352018-11-19 15:30:27 +00003621
telsoa014fcda012018-03-09 14:13:49 +00003622 return result;
3623}
3624
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003625LayerTestResult<float, 3> Concatenation3dDim0Test(
3626 armnn::IWorkloadFactory& workloadFactory,
3627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003628{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003629 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003630}
3631
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003632template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003633LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
3634 armnn::IWorkloadFactory& workloadFactory,
3635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3636 float qScale,
3637 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003638{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003639 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003640
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003641 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
3642 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00003643
telsoa014fcda012018-03-09 14:13:49 +00003644 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3645 // Batch 0, Channel 0
3646 1.0f, 2.0f,
3647
3648 // Batch 0, Channel 1
3649 3.0f, 4.0f,
3650
3651 // Batch 0, Channel 2
3652 5.0f, 6.0f,
3653
3654 // Batch 0, Channel 3
3655 7.0f, 8.0f,
3656
3657 // Batch 0, Channel 4
3658 9.0f, 10.0f,
3659
3660 // Batch 0, Channel 5
3661 11.0f, 12.0f,
3662
3663 // Batch 0, Channel 6
3664 13.0f, 14.0f,
3665
3666 // Batch 0, Channel 7
3667 15.0f, 16.0f,
3668
3669 // Batch 0, Channel 8
3670 17.0f, 18.0f,
3671
3672 // Batch 1, Channel 0
3673 19.0f, 20.0f,
3674
3675 // Batch 1, Channel 1
3676 21.0f, 22.0f,
3677
3678 // Batch 1, Channel 2
3679 23.0f, 24.0f,
3680
3681 // Batch 1, Channel 3
3682 25.0f, 26.0f,
3683
3684 // Batch 1, Channel 4
3685 27.0f, 28.0f,
3686
3687 // Batch 1, Channel 5
3688 29.0f, 30.0f,
3689
3690 // Batch 1, Channel 6
3691 31.0f, 32.0f,
3692
3693 // Batch 1, Channel 7
3694 33.0f, 34.0f,
3695
3696 // Batch 1, Channel 8
3697 35.0f, 36.0f
3698 }));
3699
3700 return result;
3701}
3702
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003703LayerTestResult<float, 3> Concatenation3dDim1Test(
3704 armnn::IWorkloadFactory& workloadFactory,
3705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003706{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003707 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003708}
3709
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003710template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003711LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
3712 armnn::IWorkloadFactory& workloadFactory,
3713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003714 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003715 float qScale,
3716 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003717{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003718 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003719
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003720 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
3721 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00003722
telsoa014fcda012018-03-09 14:13:49 +00003723 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3724 // Batch 0, Channel 0
3725 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
3726
3727 // Batch 0, Channel 1
3728 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
3729
3730 // Batch 0, Channel 2
3731 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
3732
3733 // Batch 1, Channel 0
3734 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
3735
3736 // Batch 1, Channel 1
3737 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
3738
3739 // Batch 1, Channel 2
3740 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
3741 }));
3742
3743 return result;
3744}
3745
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003746LayerTestResult<float, 3> Concatenation3dDim2Test(
3747 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00003748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3749 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003750{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003751 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
3752 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003753}
3754
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003755template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003756LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
3757 armnn::IWorkloadFactory& workloadFactory,
3758 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3759 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003760 int32_t qOffset)
3761{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003762 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003763 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3764 // Batch 0, Channel 0
3765 1.0f, 2.0f,
3766
3767 // Batch 0, Channel 1
3768 3.0f, 4.0f,
3769
3770 // Batch 0, Channel 2
3771 5.0f, 6.0f,
3772
3773 // Batch 1, Channel 0
3774 19.0f, 20.0f,
3775
3776 // Batch 1, Channel 1
3777 21.0f, 22.0f,
3778
3779 // Batch 1, Channel 2
3780 23.0f, 24.0f
3781 }));
3782
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003783 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003784 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3785 // Batch 0, Channel 0
3786 7.0f, 8.0f,
3787
3788 // Batch 0, Channel 1
3789 9.0f, 10.0f,
3790
3791 // Batch 0, Channel 2
3792 11.0f, 12.0f,
3793 }));
3794
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003795 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003796 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3797 // Batch 0, Channel 0
3798 25.0f, 26.0f,
3799
3800 // Batch 0, Channel 1
3801 27.0f, 28.0f,
3802
3803 // Batch 0, Channel 2
3804 29.0f, 30.0f,
3805
3806 // Batch 1, Channel 0
3807 13.0f, 14.0f,
3808
3809 // Batch 1, Channel 1
3810 15.0f, 16.0f,
3811
3812 // Batch 1, Channel 2
3813 17.0f, 18.0f,
3814
3815 // Batch 2, Channel 0
3816 31.0f, 32.0f,
3817
3818 // Batch 2, Channel 1
3819 33.0f, 34.0f,
3820
3821 // Batch 2, Channel 2
3822 35.0f, 36.0f
3823 }));
3824
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003825 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003826 LayerTestResult<T, 3> result(outputTensorInfo);
3827
3828 std::vector<T> output;
3829 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003830 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003831 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3832 { input0.data(), input1.data(), input2.data() },
3833 outputTensorInfo,
3834 output.data(),
3835 0,
3836 true);
telsoa014fcda012018-03-09 14:13:49 +00003837
3838 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3839 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3840 // Batch 0, Channel 0
3841 1.0f, 2.0f,
3842
3843 // Batch 0, Channel 1
3844 3.0f, 4.0f,
3845
3846 // Batch 0, Channel 2
3847 5.0f, 6.0f,
3848
3849 // Batch 1, Channel 0
3850 19.0f, 20.0f,
3851
3852 // Batch 1, Channel 1
3853 21.0f, 22.0f,
3854
3855 // Batch 1, Channel 2
3856 23.0f, 24.0f,
3857
3858 // Batch 2, Channel 0
3859 7.0f, 8.0f,
3860
3861 // Batch 2, Channel 1
3862 9.0f, 10.0f,
3863
3864 // Batch 2, Channel 2
3865 11.0f, 12.0f,
3866
3867 // Batch 3, Channel 0
3868 25.0f, 26.0f,
3869
3870 // Batch 3, Channel 1
3871 27.0f, 28.0f,
3872
3873 // Batch 3, Channel 2
3874 29.0f, 30.0f,
3875
3876 // Batch 4, Channel 0
3877 13.0f, 14.0f,
3878
3879 // Batch 4, Channel 1
3880 15.0f, 16.0f,
3881
3882 // Batch 4, Channel 2
3883 17.0f, 18.0f,
3884
3885 // Batch 5, Channel 0
3886 31.0f, 32.0f,
3887
3888 // Batch 5, Channel 1
3889 33.0f, 34.0f,
3890
3891 // Batch 5, Channel 2
3892 35.0f, 36.0f
3893 }));
3894
3895 return result;
3896}
3897
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003898LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
3899 armnn::IWorkloadFactory& workloadFactory,
3900 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003901{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003902 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
3903 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003904}
3905
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003906template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003907LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
3908 armnn::IWorkloadFactory& workloadFactory,
3909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3910 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003911 int32_t qOffset)
3912{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003913 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003914 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3915 // Batch 0, Channel 0
3916 1.0f, 2.0f,
3917
3918 // Batch 0, Channel 1
3919 3.0f, 4.0f,
3920
3921 // Batch 0, Channel 2
3922 5.0f, 6.0f,
3923
3924 // Batch 1, Channel 0
3925 19.0f, 20.0f,
3926
3927 // Batch 1, Channel 1
3928 21.0f, 22.0f,
3929
3930 // Batch 1, Channel 2
3931 23.0f, 24.0f
3932 }));
3933
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003934 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003935 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3936 // Batch 0, Channel 0
3937 7.0f, 8.0f,
3938
3939 // Batch 0, Channel 1
3940 9.0f, 10.0f,
3941
3942 // Batch 0, Channel 2
3943 11.0f, 12.0f,
3944
3945 // Batch 0, Channel 3
3946 25.0f, 26.0f,
3947
3948 // Batch 1, Channel 0
3949 27.0f, 28.0f,
3950
3951 // Batch 1, Channel 1
3952 29.0f, 30.0f,
3953
3954 // Batch 1, Channel 2
3955 13.0f, 14.0f,
3956
3957 // Batch 1, Channel 3
3958 15.0f, 16.0f,
3959 }));
3960
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003961 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003962 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3963 // Batch 0, Channel 0
3964 17.0f, 18.0f,
3965
3966 // Batch 1, Channel 0
3967 31.0f, 32.0f,
3968 }));
3969
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003970 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003971 LayerTestResult<T, 3> result(outputTensorInfo);
3972
3973 std::vector<T> output;
3974 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003975 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003976 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3977 { input0.data(), input1.data(), input2.data() },
3978 outputTensorInfo,
3979 output.data(),
3980 1,
3981 true);
telsoa014fcda012018-03-09 14:13:49 +00003982
3983 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3984 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3985 // Batch 0, Channel 0
3986 1.0f, 2.0f,
3987
3988 // Batch 0, Channel 1
3989 3.0f, 4.0f,
3990
3991 // Batch 0, Channel 2
3992 5.0f, 6.0f,
3993
3994 // Batch 0, Channel 3
3995 7.0f, 8.0f,
3996
3997 // Batch 0, Channel 4
3998 9.0f, 10.0f,
3999
4000 // Batch 0, Channel 5
4001 11.0f, 12.0f,
4002
4003 // Batch 0, Channel 6
4004 25.0f, 26.0f,
4005
4006 // Batch 0, Channel 7
4007 17.0f, 18.0f,
4008
4009 // Batch 1, Channel 0
4010 19.0f, 20.0f,
4011
4012 // Batch 1, Channel 1
4013 21.0f, 22.0f,
4014
4015 // Batch 1, Channel 2
4016 23.0f, 24.0f,
4017
4018 // Batch 1, Channel 3
4019 27.0f, 28.0f,
4020
4021 // Batch 1, Channel 4
4022 29.0f, 30.0f,
4023
4024 // Batch 1, Channel 5
4025 13.0f, 14.0f,
4026
4027 // Batch 1, Channel 6
4028 15.0f, 16.0f,
4029
4030 // Batch 1, Channel 7
4031 31.0f, 32.0f,
4032 }));
4033
4034 return result;
4035}
4036
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004037LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
4038 armnn::IWorkloadFactory& workloadFactory,
4039 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004040{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004041 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4042 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004043}
4044
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004045template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004046LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
4047 armnn::IWorkloadFactory& workloadFactory,
4048 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004049 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004050 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004051 int32_t qOffset)
4052{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004053 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004054 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4055 // Batch 0, Channel 0
4056 1.0f, 2.0f,
4057
4058 // Batch 0, Channel 1
4059 3.0f, 4.0f,
4060
4061 // Batch 0, Channel 2
4062 5.0f, 6.0f,
4063
4064 // Batch 1, Channel 0
4065 19.0f, 20.0f,
4066
4067 // Batch 1, Channel 1
4068 21.0f, 22.0f,
4069
4070 // Batch 1, Channel 2
4071 23.0f, 24.0f
4072 }));
4073
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004074 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004075 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4076 // Batch 0, Channel 0
4077 7.0f,
4078
4079 // Batch 0, Channel 1
4080 9.0f,
4081
4082 // Batch 0, Channel 2
4083 11.0f,
4084
4085 // Batch 1, Channel 0
4086 25.0f,
4087
4088 // Batch 1, Channel 1
4089 27.0f,
4090
4091 // Batch 1, Channel 2
4092 29.0f
4093 }));
4094
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004095 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004096 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4097 // Batch 0, Channel 0
4098 13.0f, 14.0f, 50.0f,
4099
4100 // Batch 0, Channel 1
4101 15.0f, 16.0f, 51.0f,
4102
4103 // Batch 0, Channel 2
4104 17.0f, 18.0f, 52.0f,
4105
4106 // Batch 1, Channel 0
4107 31.0f, 32.0f, 53.0f,
4108
4109 // Batch 1, Channel 1
4110 33.0f, 34.0f, 54.0f,
4111
4112 // Batch 1, Channel 2
4113 35.0f, 36.0f, 55.0f,
4114 }));
4115
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004116 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004117 LayerTestResult<T, 3> result(outputTensorInfo);
4118
4119 std::vector<T> output;
4120 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004121 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004122 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4123 { input0.data(), input1.data(), input2.data() },
4124 outputTensorInfo,
4125 output.data(),
4126 2,
4127 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004128
4129 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4130 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4131 // Batch 0, Channel 0
4132 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
4133
4134 // Batch 0, Channel 1
4135 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
4136
4137 // Batch 0, Channel 2
4138 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
4139
4140 // Batch 1, Channel 0
4141 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
4142
4143 // Batch 1, Channel 1
4144 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
4145
4146 // Batch 1, Channel 2
4147 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
4148 }));
4149
4150 return result;
4151}
4152
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004153LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
4154 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4156 bool useSubtensor)
4157{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004158 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
4159 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004160}
4161
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004162template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004163LayerTestResult<T, 4> Concatenation4dTestImpl(
4164 armnn::IWorkloadFactory& workloadFactory,
4165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4166 const armnn::TensorInfo& outputTensorInfo,
4167 unsigned int dimension,
4168 bool useSubtensor,
4169 float qScale,
4170 int32_t qOffset)
4171{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004172 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004173
4174 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4175 1.0f, 2.0f,
4176 3.0f, 4.0f,
4177 5.0f, 6.0f,
4178 7.0f, 8.0f,
4179 9.0f, 10.0f,
4180 11.0f, 12.0f
4181 }));
4182
4183 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4184 11.0f, 12.0f,
4185 13.0f, 14.0f,
4186 15.0f, 16.0f,
4187 17.0f, 18.0f,
4188 19.0f, 20.0f,
4189 21.0f, 22.0f
4190 }));
4191
4192 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4193 21.0f, 22.0f,
4194 23.0f, 24.0f,
4195 25.0f, 26.0f,
4196 27.0f, 28.0f,
4197 29.0f, 30.0f,
4198 31.0f, 32.0f
4199 }));
4200
4201 LayerTestResult<T, 4> result(outputTensorInfo);
4202
4203 std::vector<T> output;
4204 output.resize(outputTensorInfo.GetNumElements());
4205
4206 Concatenate<T>(workloadFactory,
4207 memoryManager,
4208 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
4209 {input0.data(), input1.data(), input2.data()},
4210 outputTensorInfo,
4211 output.data(),
4212 dimension,
4213 useSubtensor);
4214
4215 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4216 return result;
4217}
4218
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004219template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004220LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
4221 armnn::IWorkloadFactory& workloadFactory,
4222 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4223 float qScale,
4224 int32_t qOffset)
4225{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004226 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004227
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004228 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4229 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4230
narpra015cdda352018-11-19 15:30:27 +00004231 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4232 1.0f, 2.0f,
4233 3.0f, 4.0f,
4234 5.0f, 6.0f,
4235 7.0f, 8.0f,
4236 9.0f, 10.0f,
4237 11.0f, 12.0f,
4238
4239 11.0f, 12.0f,
4240 13.0f, 14.0f,
4241 15.0f, 16.0f,
4242 17.0f, 18.0f,
4243 19.0f, 20.0f,
4244 21.0f, 22.0f,
4245
4246 21.0f, 22.0f,
4247 23.0f, 24.0f,
4248 25.0f, 26.0f,
4249 27.0f, 28.0f,
4250 29.0f, 30.0f,
4251 31.0f, 32.0f
4252 }));
4253 return result;
4254}
4255
4256LayerTestResult<float, 4> Concatenation4dDim0Test(
4257 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004259{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004260 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004261}
4262
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004263template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004264LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
4265 armnn::IWorkloadFactory& workloadFactory,
4266 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4267 float qScale,
4268 int32_t qOffset)
4269{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004270 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004271
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004272 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4273 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
4274
narpra015cdda352018-11-19 15:30:27 +00004275 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4276 1.0f, 2.0f,
4277 3.0f, 4.0f,
4278 5.0f, 6.0f,
4279 7.0f, 8.0f,
4280 9.0f, 10.0f,
4281 11.0f, 12.0f,
4282
4283 11.0f, 12.0f,
4284 13.0f, 14.0f,
4285 15.0f, 16.0f,
4286 17.0f, 18.0f,
4287 19.0f, 20.0f,
4288 21.0f, 22.0f,
4289
4290 21.0f, 22.0f,
4291 23.0f, 24.0f,
4292 25.0f, 26.0f,
4293 27.0f, 28.0f,
4294 29.0f, 30.0f,
4295 31.0f, 32.0f
4296 }));
4297
4298 return result;
4299}
4300
4301LayerTestResult<float, 4> Concatenation4dDim1Test(
4302 armnn::IWorkloadFactory& workloadFactory,
4303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4304{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004305 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004306}
4307
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004308template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004309LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
4310 armnn::IWorkloadFactory& workloadFactory,
4311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4312 float qScale,
4313 int32_t qOffset)
4314{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004315 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004316
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004317 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4318 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
4319
narpra015cdda352018-11-19 15:30:27 +00004320 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4321 1.0f, 2.0f,
4322 3.0f, 4.0f,
4323 11.0f, 12.0f,
4324 13.0f, 14.0f,
4325 21.0f, 22.0f,
4326 23.0f, 24.0f,
4327
4328 5.0f, 6.0f,
4329 7.0f, 8.0f,
4330 15.0f, 16.0f,
4331 17.0f, 18.0f,
4332 25.0f, 26.0f,
4333 27.0f, 28.0f,
4334
4335 9.0f, 10.0f,
4336 11.0f, 12.0f,
4337 19.0f, 20.0f,
4338 21.0f, 22.0f,
4339 29.0f, 30.0f,
4340 31.0f, 32.0f
4341 }));
4342
4343 return result;
4344}
4345
4346LayerTestResult<float, 4> Concatenation4dDim2Test(
4347 armnn::IWorkloadFactory& workloadFactory,
4348 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4349{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004350 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004351}
4352
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004353template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004354LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
4355 armnn::IWorkloadFactory& workloadFactory,
4356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4357 float qScale,
4358 int32_t qOffset,
4359 bool useSubtensor)
4360{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004361 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004362
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004363 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4364 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
4365
narpra015cdda352018-11-19 15:30:27 +00004366 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4367 1.0f, 2.0f,
4368 11.0f, 12.0f,
4369 21.0f, 22.0f,
4370 3.0f, 4.0f,
4371 13.0f, 14.0f,
4372 23.0f, 24.0f,
4373
4374 5.0f, 6.0f,
4375 15.0f, 16.0f,
4376 25.0f, 26.0f,
4377 7.0f, 8.0f,
4378 17.0f, 18.0f,
4379 27.0f, 28.0f,
4380
4381 9.0f, 10.0f,
4382 19.0f, 20.0f,
4383 29.0f, 30.0f,
4384 11.0f, 12.0f,
4385 21.0f, 22.0f,
4386 31.0f, 32.0f
4387 }));
4388
4389 return result;
4390}
4391
4392LayerTestResult<float, 4> Concatenation4dDim3Test(
4393 armnn::IWorkloadFactory& workloadFactory,
4394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4395 bool useSubtensor)
4396{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004397 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
4398 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00004399}
4400
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004401template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004402LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
4403 armnn::IWorkloadFactory& workloadFactory,
4404 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4405 float qScale,
4406 int32_t qOffset)
4407{
4408 unsigned int dimension = 0;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004409 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004410
4411 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4412 1.0f, 2.0f,
4413 3.0f, 4.0f,
4414 5.0f, 6.0f,
4415 7.0f, 8.0f,
4416 9.0f, 10.0f,
4417 11.0f, 12.0f
4418 }));
4419
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004420 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004421
4422 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4423 11.0f, 12.0f,
4424 13.0f, 14.0f,
4425 15.0f, 16.0f,
4426 17.0f, 18.0f,
4427 19.0f, 20.0f,
4428 21.0f, 22.0f,
4429
4430 21.0f, 22.0f,
4431 23.0f, 24.0f,
4432 25.0f, 26.0f,
4433 27.0f, 28.0f,
4434 29.0f, 30.0f,
4435 31.0f, 32.0f
4436
4437 }));
4438
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004439 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004440
4441 LayerTestResult<T, 4> result(outputTensorInfo);
4442
4443 std::vector<T> output;
4444 output.resize(outputTensorInfo.GetNumElements());
4445 Concatenate<T>(workloadFactory,
4446 memoryManager,
4447 {inputTensorInfo0, inputTensorInfo1},
4448 {input0.data(), input1.data()},
4449 outputTensorInfo,
4450 output.data(),
4451 dimension,
4452 true);
4453
4454 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4455 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4456 1.0f, 2.0f,
4457 3.0f, 4.0f,
4458 5.0f, 6.0f,
4459 7.0f, 8.0f,
4460 9.0f, 10.0f,
4461 11.0f, 12.0f,
4462
4463 11.0f, 12.0f,
4464 13.0f, 14.0f,
4465 15.0f, 16.0f,
4466 17.0f, 18.0f,
4467 19.0f, 20.0f,
4468 21.0f, 22.0f,
4469
4470 21.0f, 22.0f,
4471 23.0f, 24.0f,
4472 25.0f, 26.0f,
4473 27.0f, 28.0f,
4474 29.0f, 30.0f,
4475 31.0f, 32.0f
4476 }));
4477
4478 return result;
4479}
4480
4481LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
4482 armnn::IWorkloadFactory& workloadFactory,
4483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4484{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004485 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
4486 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004487}
4488
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004489template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004490LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
4491 armnn::IWorkloadFactory& workloadFactory,
4492 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4493 float qScale,
4494 int32_t qOffset)
4495{
4496 unsigned int dimension = 1;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004497 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004498
4499 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4500 1.0f, 2.0f,
4501 3.0f, 4.0f,
4502 5.0f, 6.0f,
4503 7.0f, 8.0f,
4504 9.0f, 10.0f,
4505 11.0f, 12.0f
4506 }));
4507
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004508 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004509
4510 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4511 11.0f, 12.0f,
4512 13.0f, 14.0f,
4513 15.0f, 16.0f,
4514 17.0f, 18.0f,
4515
4516 }));
4517
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004518 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004519
4520 LayerTestResult<T, 4> result(outputTensorInfo);
4521
4522 std::vector<T> output;
4523 output.resize(outputTensorInfo.GetNumElements());
4524 Concatenate<T>(workloadFactory,
4525 memoryManager,
4526 {inputTensorInfo0, inputTensorInfo1},
4527 {input0.data(), input1.data()},
4528 outputTensorInfo,
4529 output.data(),
4530 dimension,
4531 true);
4532
4533 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4534 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4535 1.0f, 2.0f,
4536 3.0f, 4.0f,
4537 5.0f, 6.0f,
4538 7.0f, 8.0f,
4539 9.0f, 10.0f,
4540 11.0f, 12.0f,
4541 11.0f, 12.0f,
4542 13.0f, 14.0f,
4543 15.0f, 16.0f,
4544 17.0f, 18.0f
4545 }));
4546
4547 return result;
4548}
4549
4550LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
4551 armnn::IWorkloadFactory& workloadFactory,
4552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4553{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004554 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
4555 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004556}
4557
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004558template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004559LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
4560 armnn::IWorkloadFactory& workloadFactory,
4561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4562 float qScale,
4563 int32_t qOffset)
4564{
4565 unsigned int dimension = 2;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004566 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004567
4568 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4569 1.0f, 2.0f,
4570 3.0f, 4.0f,
4571 5.0f, 6.0f,
4572 7.0f, 8.0f,
4573 9.0f, 10.0f,
4574 11.0f, 12.0f
4575 }));
4576
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004577 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004578
4579 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4580 11.0f, 12.0f,
4581 13.0f, 14.0f,
4582 15.0f, 16.0f,
4583 17.0f, 18.0f,
4584 19.0f, 20.0f,
4585 21.0f, 22.0f,
4586 23.0f, 24.0f,
4587 25.0f, 26.0f,
4588 27.0f, 28.0f
4589 }));
4590
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004591 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004592
4593 LayerTestResult<T, 4> result(outputTensorInfo);
4594
4595 std::vector<T> output;
4596 output.resize(outputTensorInfo.GetNumElements());
4597 Concatenate<T>(workloadFactory,
4598 memoryManager,
4599 {inputTensorInfo0, inputTensorInfo1},
4600 {input0.data(), input1.data()},
4601 outputTensorInfo,
4602 output.data(),
4603 dimension,
4604 true);
4605
4606 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4607 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4608 1.0f, 2.0f,
4609 3.0f, 4.0f,
4610 11.0f, 12.0f,
4611 13.0f, 14.0f,
4612 15.0f, 16.0f,
4613
4614 5.0f, 6.0f,
4615 7.0f, 8.0f,
4616 17.0f, 18.0f,
4617 19.0f, 20.0f,
4618 21.0f, 22.0f,
4619
4620 9.0f, 10.0f,
4621 11.0f, 12.0f,
4622 23.0f, 24.0f,
4623 25.0f, 26.0f,
4624 27.0f, 28.0f
4625 }));
4626
4627 return result;
4628}
4629
4630LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
4631 armnn::IWorkloadFactory& workloadFactory,
4632 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4633{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004634 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
4635 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004636}
4637
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004638template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004639LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
4640 armnn::IWorkloadFactory& workloadFactory,
4641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4642 float qScale,
4643 int32_t qOffset,
4644 bool useSubtensor)
4645{
4646 unsigned int dimension = 3;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004647 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004648
4649 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4650 1.0f, 2.0f,
4651 3.0f, 4.0f,
4652 5.0f, 6.0f,
4653 7.0f, 8.0f,
4654 9.0f, 10.0f,
4655 11.0f, 12.0f
4656 }));
4657
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004658 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004659
4660 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4661 11.0f, 12.0f, 13.0f,
4662 14.0f, 15.0f, 16.0f,
4663
4664 17.0f, 18.0f, 19.0f,
4665 20.0f, 21.0f, 22.0f,
4666
4667 23.0f, 24.0f, 25.0f,
4668 26.0f, 27.0f, 28.0f
4669 }));
4670
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004671 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004672
4673 LayerTestResult<T, 4> result(outputTensorInfo);
4674
4675 std::vector<T> output;
4676 output.resize(outputTensorInfo.GetNumElements());
4677 Concatenate<T>(workloadFactory,
4678 memoryManager,
4679 {inputTensorInfo0, inputTensorInfo1},
4680 {input0.data(), input1.data()},
4681 outputTensorInfo,
4682 output.data(),
4683 dimension,
4684 useSubtensor);
4685
4686 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4687 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4688 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
4689 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
4690 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
4691 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
4692 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
4693 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
4694 }));
4695
4696 return result;
4697}
4698
4699LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
4700 armnn::IWorkloadFactory& workloadFactory,
4701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4702 bool useSubtensor)
4703{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004704 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
4705 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004706}
4707
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004708LayerTestResult<float, 4> ResizeBilinearNopTest(
4709 armnn::IWorkloadFactory& workloadFactory,
4710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004711 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004712{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004713 const armnn::TensorInfo inputTensorInfo =
4714 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
4715
4716 const armnn::TensorInfo outputTensorInfo =
4717 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00004718
James Conroy6b965822018-11-01 11:33:09 +00004719 std::vector<float> inputData({
4720 1.0f, 2.0f, 3.0f, 4.0f,
4721 2.0f, 3.0f, 4.0f, 5.0f,
4722 3.0f, 4.0f, 5.0f, 6.0f,
4723 4.0f, 5.0f, 6.0f, 7.0f,
4724
telsoa014fcda012018-03-09 14:13:49 +00004725 1.0f, 2.0f, 3.0f, 4.0f,
4726 2.0f, 3.0f, 4.0f, 5.0f,
4727 3.0f, 4.0f, 5.0f, 6.0f,
4728 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00004729 });
4730
4731 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004732 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004733 {
4734 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004735 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004736 inputData = tmp;
4737 }
4738
4739 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004740
4741 LayerTestResult<float, 4> result(outputTensorInfo);
4742 result.outputExpected = input;
4743
4744 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4745 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4746
4747 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004748 descriptor.m_Parameters.m_DataLayout = dataLayout;
4749 armnn::WorkloadInfo info;
4750 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4751 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4752
4753 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4754
4755 inputHandle->Allocate();
4756 outputHandle->Allocate();
4757 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4758
James Conroy074f3712018-10-03 09:32:03 +01004759 workload->Execute();
4760
4761 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4762 return result;
4763}
4764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004765LayerTestResult<float, 4> SimpleResizeBilinearTest(
4766 armnn::IWorkloadFactory& workloadFactory,
4767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004768 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01004769{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004770 const armnn::TensorInfo inputTensorInfo =
4771 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
4772
4773 const armnn::TensorInfo outputTensorInfo =
4774 armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
James Conroy074f3712018-10-03 09:32:03 +01004775
James Conroy6b965822018-11-01 11:33:09 +00004776 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004777 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00004778 200.0f, 250.0f,
4779
4780 250.0f, 200.0f,
4781 250.0f, 1.0f
4782 });
James Conroy074f3712018-10-03 09:32:03 +01004783
4784 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
4785 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00004786 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
4787 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
4788 // which we would expect if projecting the centre).
4789
4790 std::vector<float> outputData({
4791 1.0f,
4792
4793 250.0f
4794 });
4795
4796 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004797 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004798 {
4799 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004800 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004801 inputData = tmp;
4802
4803 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004804 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004805 outputData = tmp1;
4806 }
4807
4808 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
4809
James Conroy074f3712018-10-03 09:32:03 +01004810 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004811 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01004812
4813 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4814 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4815
4816 armnn::ResizeBilinearQueueDescriptor descriptor;
4817 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004818 armnn::WorkloadInfo info;
4819 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4820 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4821
4822 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4823
4824 inputHandle->Allocate();
4825 outputHandle->Allocate();
4826 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4827
4828 workload->Execute();
4829
4830 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4831 return result;
4832}
4833
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004834LayerTestResult<float, 4> ResizeBilinearSqMinTest(
4835 armnn::IWorkloadFactory& workloadFactory,
4836 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004837 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004838{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004839 const armnn::TensorInfo inputTensorInfo =
4840 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
4841
4842 const armnn::TensorInfo outputTensorInfo =
4843 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00004844
James Conroy6b965822018-11-01 11:33:09 +00004845 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004846 1.0f, 2.0f, 3.0f, 4.0f,
4847 2.0f, 3.0f, 4.0f, 5.0f,
4848 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00004849 4.0f, 5.0f, 6.0f, 7.0f,
4850
4851 7.0f, 6.0f, 5.0f, 4.0f,
4852 6.0f, 5.0f, 4.0f, 3.0f,
4853 5.0f, 4.0f, 3.0f, 2.0f,
4854 4.0f, 3.0f, 2.0f, 1.0f
4855 });
4856
4857 std::vector<float> outputData({
4858 1.0f, 3.0f,
4859 3.0f, 5.0f,
4860
4861 7.0f, 5.0f,
4862 5.0f, 3.0f
4863 });
4864
4865 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004866 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004867 {
4868 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004869 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004870 inputData = tmp;
4871
4872 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004873 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004874 outputData = tmp1;
4875 }
4876
4877 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004878
telsoa014fcda012018-03-09 14:13:49 +00004879 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004880 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004881
4882 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4883 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4884
4885 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004886 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004887 armnn::WorkloadInfo info;
4888 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4889 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4890
4891 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4892
4893 inputHandle->Allocate();
4894 outputHandle->Allocate();
4895 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4896
4897 workload->Execute();
4898
4899 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4900 return result;
4901}
4902
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004903LayerTestResult<float, 4> ResizeBilinearMinTest(
4904 armnn::IWorkloadFactory& workloadFactory,
4905 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004906 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004907{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004908 const armnn::TensorInfo inputTensorInfo =
4909 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
4910
4911 const armnn::TensorInfo outputTensorInfo =
4912 armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00004913
James Conroy6b965822018-11-01 11:33:09 +00004914 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004915 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
4916 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00004917 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
4918
4919 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
4920 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
4921 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
4922 });
4923
4924 std::vector<float> outputData({
4925 1.0f, 2.6666f, 6.00f,
4926 78.5f, 179.3333f, 401.00f,
4927
4928 987.0f, 454.6670f, 203.33f,
4929 48.5f, 22.3333f, 10.00f
4930 });
4931
4932 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004933 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004934 {
4935 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004936 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004937 inputData = tmp;
4938
4939 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004940 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004941 outputData = tmp1;
4942 }
4943
4944 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004945
4946 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004947 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004948
4949 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4950 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4951
4952 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004953 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004954 armnn::WorkloadInfo info;
4955 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4956 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4957
4958 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4959
4960 inputHandle->Allocate();
4961 outputHandle->Allocate();
4962 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4963
4964 workload->Execute();
4965
4966 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4967 return result;
4968}
4969
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004970LayerTestResult<float, 4> ResizeBilinearMagTest(
4971 armnn::IWorkloadFactory& workloadFactory,
4972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004973 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004974{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004975 const armnn::TensorInfo inputTensorInfo =
4976 armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
4977
4978 const armnn::TensorInfo outputTensorInfo =
4979 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00004980
James Conroy6b965822018-11-01 11:33:09 +00004981 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004982 1.0f, 2.0f,
4983 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00004984 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00004985
James Conroy6b965822018-11-01 11:33:09 +00004986 233.0f, 144.0f,
4987 21.0f, 13.0f,
4988 2.0f, 1.0f
4989 });
4990
4991 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01004992 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
4993 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00004994 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
4995
4996 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
4997 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
4998 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
4999 });
5000
5001 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005002 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005003 {
5004 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005005 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005006 inputData = tmp;
5007
5008 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005009 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005010 outputData = tmp1;
5011 }
5012
5013 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5014
5015 LayerTestResult<float, 4> result(outputTensorInfo);
5016 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005017
5018 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5019 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5020
5021 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005022 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005023 armnn::WorkloadInfo info;
5024 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5025 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5026
5027 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5028
5029 inputHandle->Allocate();
5030 outputHandle->Allocate();
5031 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5032
5033 workload->Execute();
5034
5035 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5036 return result;
5037}
5038
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005039LayerTestResult<float, 2> FakeQuantizationTest(
5040 armnn::IWorkloadFactory& workloadFactory,
5041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005042{
5043 constexpr unsigned int width = 2;
5044 constexpr unsigned int height = 3;
5045
5046 const armnn::TensorInfo tensorInfo({height, width },
5047 armnn::DataType::Float32);
5048 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5049 -10.0f, -5.0f,
5050 0.0f, 5.0f,
5051 10.0f, 10.0f
5052 }));
5053
5054 LayerTestResult<float, 2> ret(tensorInfo);
5055
5056 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5057
5058 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5059
5060 armnn::FakeQuantizationQueueDescriptor data;
5061 armnn::WorkloadInfo info;
5062
5063 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5064 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5065 float min = -10.f;
5066 float max = 10.f;
5067
5068 data.m_Parameters.m_Min = min;
5069 data.m_Parameters.m_Max = max;
5070
5071 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5072 armnn::FakeQuantizationQueueDescriptor refData = data;
5073 armnn::WorkloadInfo refInfo = info;
5074 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5075
5076 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5077
5078 inputHandle->Allocate();
5079 outputHandle->Allocate();
5080
5081 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5082
5083 workload->Execute();
5084
5085 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5086
5087 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5088 0.0f, 63.0f,
5089 128.0f, 191.0f,
5090 255.0f, 255.0f
5091 }));
5092 return ret;
5093}
5094
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005095namespace
5096{
5097
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005098LayerTestResult<float, 4> L2NormalizationTestImpl(
5099 armnn::IWorkloadFactory& workloadFactory,
5100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5101 const armnn::TensorShape& inputOutputTensorShape,
5102 const std::vector<float>& inputValues,
5103 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00005104 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005105{
5106 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5107 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5108
jimfly013aab7c32018-11-12 13:32:08 +00005109 // at this point if we require it permute the input data
5110 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5111 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005112 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005113 {
5114 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005115 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005116 inputData = tmp;
5117 }
5118
5119 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005120
5121 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00005122 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005123 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005124 {
5125 std::vector<float> tmp(expectedOutputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005126 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
5127 expectedOutputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005128 expectedOutputData = tmp;
5129 }
5130 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005131
5132 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5133 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5134
5135 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00005136 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005137 armnn::WorkloadInfo info;
5138
5139 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5140 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5141
5142 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5143
5144 inputHandle->Allocate();
5145 outputHandle->Allocate();
5146
5147 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5148
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005149 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005150
5151 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5152
5153 return result;
5154}
5155
5156float CalcInvL2Norm(std::initializer_list<float> elements)
5157{
5158 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5159 [](float acc, float element) { return acc + element * element; });
5160 return 1.0f / sqrtf(reduction);
5161}
5162
5163} // anonymous namespace
5164
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005165template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005166LayerTestResult<T, 2> Pad2dTestCommon(
5167 armnn::IWorkloadFactory& workloadFactory,
5168 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5169 float qScale,
5170 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005171{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005172 const armnn::TensorShape inputShape{ 3, 3 };
5173 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005174
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005175 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5176 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005177
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005178 std::vector<T> inputValues(
5179 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005180 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005181 // Height (3) x Width (3)
5182 4, 8, 6,
5183 7, 4, 4,
5184 3, 2, 4
5185 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005186
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005187 std::vector<T> expectedOutputValues(
5188 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005189 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005190 0, 0, 0, 0, 0, 0, 0,
5191 0, 0, 0, 0, 0, 0, 0,
5192 0, 0, 4, 8, 6, 0, 0,
5193 0, 0, 7, 4, 4, 0, 0,
5194 0, 0, 3, 2, 4, 0, 0,
5195 0, 0, 0, 0, 0, 0, 0,
5196 0, 0, 0, 0, 0, 0, 0
5197 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005198
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005199 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005200
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005201 LayerTestResult<T, 2> result(outputTensorInfo);
5202 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005203
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005204 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5205 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005206
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005207 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005208
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005209 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5210 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5211 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005212
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005213 descriptor.m_Parameters.m_PadList = PadList;
5214 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005215
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005216 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5217 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005218
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005219 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005220
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005221 inputHandle->Allocate();
5222 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005223
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005224 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005225
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005226 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005227
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005228 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005229
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005230 return result;
5231}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005232
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005233template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005234LayerTestResult<T, 3> Pad3dTestCommon(
5235 armnn::IWorkloadFactory& workloadFactory,
5236 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5237 float qScale,
5238 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005239{
5240 const armnn::TensorShape inputShape{ 2, 2, 2 };
5241 const armnn::TensorShape outputShape{ 3, 5, 6 };
5242
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005243 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5244 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005245
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005246 std::vector<T> inputValues(
5247 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005248 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005249 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005250 0, 4,
5251 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005252
5253 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005254 6, 1,
5255 5, 2
5256 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005257
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005258 std::vector<T> expectedOutputValues(
5259 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005260 {
5261
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005262 0, 0, 0, 0, 0, 0,
5263 0, 0, 0, 0, 0, 0,
5264 0, 0, 0, 4, 0, 0,
5265 0, 0, 2, 5, 0, 0,
5266 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005267
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005268 0, 0, 0, 0, 0, 0,
5269 0, 0, 0, 0, 0, 0,
5270 0, 0, 6, 1, 0, 0,
5271 0, 0, 5, 2, 0, 0,
5272 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005273
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005274 0, 0, 0, 0, 0, 0,
5275 0, 0, 0, 0, 0, 0,
5276 0, 0, 0, 0, 0, 0,
5277 0, 0, 0, 0, 0, 0,
5278 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005279
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005280 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005281
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005282 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005283
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005284 LayerTestResult<T, 3> result(outputTensorInfo);
5285 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005286
5287 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5288 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5289
5290 armnn::PadQueueDescriptor descriptor;
5291
5292 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5293 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5294 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5295 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5296
5297 descriptor.m_Parameters.m_PadList = PadList;
5298 armnn::WorkloadInfo info;
5299
5300 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5301 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5302
5303 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5304
5305 inputHandle->Allocate();
5306 outputHandle->Allocate();
5307
5308 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
5309
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005310 workload->Execute();
5311
5312 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
5313
5314 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005315}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005316
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005317template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005318LayerTestResult<T, 4> Pad4dTestCommon(
5319 armnn::IWorkloadFactory& workloadFactory,
5320 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5321 float qScale,
5322 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005323{
5324 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
5325 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
5326
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005327 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5328 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005329
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005330 std::vector<T> inputValues(
5331 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005332 {
5333 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005334 0, 1,
5335 2, 3,
5336 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005337
5338 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005339 6, 7,
5340 8, 9,
5341 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005342
5343 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005344 12, 13,
5345 14, 15,
5346 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005347
5348 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005349 18, 19,
5350 20, 21,
5351 22, 23
5352 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005353
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005354 std::vector<T> expectedOutputValues(
5355 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005356 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005357 0, 0, 0, 0,
5358 0, 0, 0, 0,
5359 0, 0, 0, 0,
5360 0, 0, 0, 0,
5361 0, 0, 0, 0,
5362 0, 0, 0, 0,
5363 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005364
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005365 0, 0, 0, 0,
5366 0, 0, 0, 0,
5367 0, 0, 0, 0,
5368 0, 0, 0, 0,
5369 0, 0, 0, 0,
5370 0, 0, 0, 0,
5371 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005372
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005373 0, 0, 0, 0,
5374 0, 0, 0, 0,
5375 0, 0, 0, 0,
5376 0, 0, 0, 0,
5377 0, 0, 0, 0,
5378 0, 0, 0, 0,
5379 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005380
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005381 0, 0, 0, 0,
5382 0, 0, 0, 0,
5383 0, 0, 0, 0,
5384 0, 0, 0, 0,
5385 0, 0, 0, 0,
5386 0, 0, 0, 0,
5387 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005388
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005389 0, 0, 0, 0,
5390 0, 0, 0, 0,
5391 0, 0, 0, 0,
5392 0, 0, 0, 0,
5393 0, 0, 0, 0,
5394 0, 0, 0, 0,
5395 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005396
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005397 0, 0, 0, 0,
5398 0, 0, 0, 0,
5399 0, 0, 0, 0,
5400 0, 0, 0, 0,
5401 0, 0, 0, 0,
5402 0, 0, 0, 0,
5403 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005404
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005405 0, 0, 0, 0,
5406 0, 0, 0, 0,
5407 0, 0, 0, 0,
5408 0, 0, 0, 0,
5409 0, 0, 0, 0,
5410 0, 0, 0, 0,
5411 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005412
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005413 0, 0, 0, 0,
5414 0, 0, 0, 0,
5415 0, 0, 0, 0,
5416 0, 0, 1, 0,
5417 0, 2, 3, 0,
5418 0, 4, 5, 0,
5419 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005420
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005421 0, 0, 0, 0,
5422 0, 0, 0, 0,
5423 0, 0, 0, 0,
5424 0, 6, 7, 0,
5425 0, 8, 9, 0,
5426 0, 10, 11, 0,
5427 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005428
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005429 0, 0, 0, 0,
5430 0, 0, 0, 0,
5431 0, 0, 0, 0,
5432 0, 0, 0, 0,
5433 0, 0, 0, 0,
5434 0, 0, 0, 0,
5435 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005436
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005437 0, 0, 0, 0,
5438 0, 0, 0, 0,
5439 0, 0, 0, 0,
5440 0, 0, 0, 0,
5441 0, 0, 0, 0,
5442 0, 0, 0, 0,
5443 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005444
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005445 0, 0, 0, 0,
5446 0, 0, 0, 0,
5447 0, 0, 0, 0,
5448 0, 0, 0, 0,
5449 0, 0, 0, 0,
5450 0, 0, 0, 0,
5451 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005452
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005453 0, 0, 0, 0,
5454 0, 0, 0, 0,
5455 0, 0, 0, 0,
5456 0, 12, 13, 0,
5457 0, 14, 15, 0,
5458 0, 16, 17, 0,
5459 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005460
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005461 0, 0, 0, 0,
5462 0, 0, 0, 0,
5463 0, 0, 0, 0,
5464 0, 18, 19, 0,
5465 0, 20, 21, 0,
5466 0, 22, 23, 0,
5467 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005468
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005469 0, 0, 0, 0,
5470 0, 0, 0, 0,
5471 0, 0, 0, 0,
5472 0, 0, 0, 0,
5473 0, 0, 0, 0,
5474 0, 0, 0, 0,
5475 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005476
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005477 0, 0, 0, 0,
5478 0, 0, 0, 0,
5479 0, 0, 0, 0,
5480 0, 0, 0, 0,
5481 0, 0, 0, 0,
5482 0, 0, 0, 0,
5483 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005484
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005485 0, 0, 0, 0,
5486 0, 0, 0, 0,
5487 0, 0, 0, 0,
5488 0, 0, 0, 0,
5489 0, 0, 0, 0,
5490 0, 0, 0, 0,
5491 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005492
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005493 0, 0, 0, 0,
5494 0, 0, 0, 0,
5495 0, 0, 0, 0,
5496 0, 0, 0, 0,
5497 0, 0, 0, 0,
5498 0, 0, 0, 0,
5499 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005500
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005501 0, 0, 0, 0,
5502 0, 0, 0, 0,
5503 0, 0, 0, 0,
5504 0, 0, 0, 0,
5505 0, 0, 0, 0,
5506 0, 0, 0, 0,
5507 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005508
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005509 0, 0, 0, 0,
5510 0, 0, 0, 0,
5511 0, 0, 0, 0,
5512 0, 0, 0, 0,
5513 0, 0, 0, 0,
5514 0, 0, 0, 0,
5515 0, 0, 0, 0
5516 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005517
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005518 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005519
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005520 LayerTestResult<T, 4> result(outputTensorInfo);
5521 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005522
5523 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5524 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5525
5526 armnn::PadQueueDescriptor descriptor;
5527
5528 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5529 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5530 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5531 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
5532 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5533
5534 descriptor.m_Parameters.m_PadList = PadList;
5535 armnn::WorkloadInfo info;
5536
5537 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5538 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5539
5540 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5541
5542 inputHandle->Allocate();
5543 outputHandle->Allocate();
5544
5545 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5546
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005547 workload->Execute();
5548
5549 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5550
5551 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005552}
5553
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005554LayerTestResult<uint8_t, 2> PadUint82dTest(
5555 armnn::IWorkloadFactory& workloadFactory,
5556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005557{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005558 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005559}
5560
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005561LayerTestResult<uint8_t, 3> PadUint83dTest(
5562 armnn::IWorkloadFactory& workloadFactory,
5563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005564{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005565 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005566}
5567
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005568LayerTestResult<uint8_t, 4> PadUint84dTest(
5569 armnn::IWorkloadFactory& workloadFactory,
5570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005571{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005572 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005573}
5574
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005575LayerTestResult<float, 2> PadFloat322dTest(
5576 armnn::IWorkloadFactory& workloadFactory,
5577 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005578{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005579 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005580}
5581
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005582LayerTestResult<float, 3> PadFloat323dTest(
5583 armnn::IWorkloadFactory& workloadFactory,
5584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005585{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005586 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005587}
5588
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005589LayerTestResult<float, 4> PadFloat324dTest(
5590 armnn::IWorkloadFactory& workloadFactory,
5591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005592{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005593 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005594}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005595
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005596LayerTestResult<float, 4> L2Normalization1dTest(
5597 armnn::IWorkloadFactory& workloadFactory,
5598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005599 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005600{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005601 // Width: 1
5602 // Height: 1
5603 // Channels: 10
5604 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005605 unsigned int numberOfBatches = 1;
5606 unsigned int numberOfChannels = 10;
5607 unsigned int height = 1;
5608 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00005609
jimfly013aab7c32018-11-12 13:32:08 +00005610
Nina Drozdd41b2592018-11-19 13:03:36 +00005611 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005612 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005613 std::vector<float> inputValues
5614 {
5615 // Batch 0, Channel 0, Height (1) x Width (1)
5616 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00005617
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005618 // Batch 0, Channel 1, Height (1) x Width (1)
5619 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00005620
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005621 // Batch 0, Channel 2, Height (1) x Width (1)
5622 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00005623
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005624 // Batch 0, Channel 3, Height (1) x Width (1)
5625 4.0f,
5626
5627 // Batch 0, Channel 4, Height (1) x Width (1)
5628 5.0f,
5629
5630 // Batch 0, Channel 5, Height (1) x Width (1)
5631 6.0f,
5632
5633 // Batch 0, Channel 6, Height (1) x Width (1)
5634 7.0f,
5635
5636 // Batch 0, Channel 7, Height (1) x Width (1)
5637 8.0f,
5638
5639 // Batch 0, Channel 8, Height (1) x Width (1)
5640 9.0f,
5641
5642 // Batch 0, Channel 9, Height (1) x Width (1)
5643 10.0f
5644 };
telsoa014fcda012018-03-09 14:13:49 +00005645 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005646 std::vector<float> expectedOutputValues
5647 {
5648 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00005649 1.0f * approxInvL2Norm,
5650 2.0f * approxInvL2Norm,
5651 3.0f * approxInvL2Norm,
5652 4.0f * approxInvL2Norm,
5653 5.0f * approxInvL2Norm,
5654 6.0f * approxInvL2Norm,
5655 7.0f * approxInvL2Norm,
5656 8.0f * approxInvL2Norm,
5657 9.0f * approxInvL2Norm,
5658 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005659 };
telsoa014fcda012018-03-09 14:13:49 +00005660
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005661
5662 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005663 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005664}
5665
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005666LayerTestResult<float, 4> L2Normalization2dTest(
5667 armnn::IWorkloadFactory& workloadFactory,
5668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005669 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005670{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005671 // Width: 5
5672 // Height: 1
5673 // Channels: 2
5674 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005675 unsigned int numberOfBatches = 1;
5676 unsigned int numberOfChannels = 2;
5677 unsigned int height = 1;
5678 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00005679
Nina Drozdd41b2592018-11-19 13:03:36 +00005680 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005681 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005682 std::vector<float> inputValues
5683 {
5684 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00005685 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00005686
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005687 // Batch 0, Channel 1, Height (1) x Width (5)
5688 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
5689 };
5690 std::vector<float> expectedOutputValues
5691 {
5692 // Batch 0, Channel 0, Height (1) x Width (5)
5693 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5694 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5695 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5696 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005697 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
5698
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005699 // Batch 0, Channel 1, Height (1) x Width (5)
5700 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5701 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5702 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5703 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005704 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005705 };
telsoa014fcda012018-03-09 14:13:49 +00005706
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005707 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005708 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005709}
telsoa014fcda012018-03-09 14:13:49 +00005710
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005711LayerTestResult<float, 4> L2Normalization3dTest(
5712 armnn::IWorkloadFactory& workloadFactory,
5713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005714 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005715{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005716 // Width: 3
5717 // Height: 4
5718 // Channels: 2
5719 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005720 unsigned int numberOfBatches = 1;
5721 unsigned int numberOfChannels = 2;
5722 unsigned int height = 4;
5723 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005724
Nina Drozdd41b2592018-11-19 13:03:36 +00005725 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005726 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005727 std::vector<float> inputValues
5728 {
5729 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005730 119.0f, 21.0f, 150.0f,
5731 149.0f, 32.0f, 179.0f,
5732 15.0f, 227.0f, 141.0f,
5733 147.0f, 199.0f, 220.0f,
5734
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005735 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005736 110.0f, 140.0f, 73.0f,
5737 211.0f, 212.0f, 89.0f,
5738 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005739 162.0f, 12.0f, 161.0f
5740 };
5741 std::vector<float> expectedOutputValues
5742 {
5743 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005744 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5745 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5746 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5747 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5748 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5749 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5750 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5751 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5752 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5753 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
5754 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
5755 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
5756
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005757 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005758 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5759 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5760 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5761 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5762 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5763 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5764 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5765 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5766 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5767 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
5768 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005769 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
5770 };
telsoa014fcda012018-03-09 14:13:49 +00005771
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005772 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005773 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005774}
telsoa014fcda012018-03-09 14:13:49 +00005775
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005776LayerTestResult<float, 4> L2Normalization4dTest(
5777 armnn::IWorkloadFactory& workloadFactory,
5778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005779 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005780{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005781 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005782 // Height: 4
5783 // Channels: 3
5784 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00005785 unsigned int numberOfBatches = 2;
5786 unsigned int numberOfChannels = 3;
5787 unsigned int height = 4;
5788 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005789
Nina Drozdd41b2592018-11-19 13:03:36 +00005790 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005791 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005792 std::vector<float> inputValues
5793 {
5794 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005795 235.0f, 46.0f, 178.0f,
5796 100.0f, 123.0f, 19.0f,
5797 172.0f, 74.0f, 250.0f,
5798 6.0f, 195.0f, 80.0f,
5799
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005800 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005801 113.0f, 95.0f, 202.0f,
5802 77.0f, 114.0f, 71.0f,
5803 122.0f, 246.0f, 166.0f,
5804 82.0f, 28.0f, 37.0f,
5805
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005806 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005807 56.0f, 170.0f, 162.0f,
5808 194.0f, 89.0f, 254.0f,
5809 12.0f, 209.0f, 200.0f,
5810 1.0f, 64.0f, 54.0f,
5811
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005812 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005813 67.0f, 90.0f, 49.0f,
5814 7.0f, 163.0f, 18.0f,
5815 25.0f, 117.0f, 103.0f,
5816 247.0f, 59.0f, 189.0f,
5817
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005818 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005819 239.0f, 104.0f, 199.0f,
5820 17.0f, 124.0f, 153.0f,
5821 222.0f, 217.0f, 75.0f,
5822 32.0f, 126.0f, 21.0f,
5823
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005824 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005825 97.0f, 145.0f, 215.0f,
5826 115.0f, 116.0f, 238.0f,
5827 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005828 92.0f, 125.0f, 88.0f
5829 };
5830 std::vector<float> expectedOutputValues
5831 {
5832 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005833 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5834 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5835 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5836 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5837 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5838 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5839 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5840 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5841 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5842 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5843 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5844 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5845
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005846 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005847 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5848 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5849 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5850 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5851 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5852 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5853 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5854 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5855 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5856 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5857 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5858 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5859
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005860 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005861 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5862 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5863 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5864 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5865 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5866 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5867 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5868 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5869 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5870 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5871 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5872 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5873
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005874 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005875 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5876 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5877 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5878 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5879 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5880 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5881 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5882 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5883 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5884 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5885 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
5886 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
5887
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005888 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005889 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5890 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5891 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5892 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5893 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5894 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5895 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5896 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5897 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5898 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5899 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
5900 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
5901
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005902 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005903 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5904 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5905 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5906 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5907 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5908 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5909 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5910 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5911 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5912 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5913 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005914 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
5915 };
telsoa014fcda012018-03-09 14:13:49 +00005916
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005917 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005918 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005919}
5920
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005921template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005922LayerTestResult<T, 4> ConstantTestImpl(
5923 armnn::IWorkloadFactory& workloadFactory,
5924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00005925 float qScale,
5926 int32_t qOffset)
5927{
5928 constexpr unsigned int inputWidth = 3;
5929 constexpr unsigned int inputHeight = 4;
5930 constexpr unsigned int inputChannels = 3;
5931 constexpr unsigned int inputBatchSize = 2;
5932
5933 constexpr unsigned int outputWidth = inputWidth;
5934 constexpr unsigned int outputHeight = inputHeight;
5935 constexpr unsigned int outputChannels = inputChannels;
5936 constexpr unsigned int outputBatchSize = inputBatchSize;
5937
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005938 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005939
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005940 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005941
5942 // Set quantization parameters if the requested type is a quantized type.
5943 if(armnn::IsQuantizedType<T>())
5944 {
5945 inputTensorInfo.SetQuantizationScale(qScale);
5946 inputTensorInfo.SetQuantizationOffset(qOffset);
5947 outputTensorInfo.SetQuantizationScale(qScale);
5948 outputTensorInfo.SetQuantizationOffset(qOffset);
5949 }
5950
5951 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
5952 QuantizedVector<T>(qScale, qOffset, {
5953 // Batch 0, Channel 0
5954 235.0f, 46.0f, 178.0f,
5955 100.0f, 123.0f, 19.0f,
5956 172.0f, 74.0f, 250.0f,
5957 6.0f, 195.0f, 80.0f,
5958
5959 // Batch 0, Channel 1
5960 113.0f, 95.0f, 202.0f,
5961 77.0f, 114.0f, 71.0f,
5962 122.0f, 246.0f, 166.0f,
5963 82.0f, 28.0f, 37.0f,
5964
5965 // Batch 0, Channel 2
5966 56.0f, 170.0f, 162.0f,
5967 194.0f, 89.0f, 254.0f,
5968 12.0f, 209.0f, 200.0f,
5969 1.0f, 64.0f, 54.0f,
5970
5971 // Batch 1, Channel 0
5972 67.0f, 90.0f, 49.0f,
5973 7.0f, 163.0f, 18.0f,
5974 25.0f, 117.0f, 103.0f,
5975 247.0f, 59.0f, 189.0f,
5976
5977 // Batch 1, Channel 1
5978 239.0f, 104.0f, 199.0f,
5979 17.0f, 124.0f, 153.0f,
5980 222.0f, 217.0f, 75.0f,
5981 32.0f, 126.0f, 21.0f,
5982
5983 // Batch 1, Channel 2
5984 97.0f, 145.0f, 215.0f,
5985 115.0f, 116.0f, 238.0f,
5986 226.0f, 16.0f, 132.0f,
5987 92.0f, 125.0f, 88.0f,
5988 })));
5989
5990 LayerTestResult<T, 4> result(outputTensorInfo);
5991 result.outputExpected = input;
5992
5993 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5994
5995 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
5996 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
5997
5998 armnn::ConstantQueueDescriptor descriptor;
5999 descriptor.m_LayerOutput = &constantTensor;
6000
6001 armnn::WorkloadInfo info;
6002 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6003
6004 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6005
6006 outputHandle->Allocate();
6007
6008 workload->Execute();
6009
6010 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6011 return result;
6012}
6013
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006014LayerTestResult<float, 4> ConstantTest(
6015 armnn::IWorkloadFactory& workloadFactory,
6016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006017{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006018 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006019}
6020
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006021LayerTestResult<uint8_t, 4> ConstantTestUint8(
6022 armnn::IWorkloadFactory& workloadFactory,
6023 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006024{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006025 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006026}
6027
Ferran Balaguerb2845652019-02-27 09:42:06 +00006028LayerTestResult<uint8_t, 3> MergerUint8DifferentQParamsTest(
6029 armnn::IWorkloadFactory& workloadFactory,
6030 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6031{
6032 unsigned int outputWidth = 3;
6033 unsigned int outputHeight = 6;
6034 unsigned int outputChannels = 3;
6035
6036 unsigned int inputWidth1 = 3;
6037 unsigned int inputHeight1 = 6;
6038 unsigned int inputChannels1 = 2;
6039
6040 unsigned int inputWidth2 = 3;
6041 unsigned int inputHeight2 = 6;
6042 unsigned int inputChannels2 = 1;
6043
6044 // Defines the tensor descriptors.
6045 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6046 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6047 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6048
6049 // Quantized input1 tensor. Range [-3, 1]
6050 const float inputScale1 = 0.015686f;
6051 const int32_t inputOffset1 = 192;
6052
6053 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6054 {
6055 1, 2, 3,
6056 4, 5, 6,
6057 7, 8, 9,
6058 10, 11, 12,
6059 13, 14, 15,
6060 16, 17, 18,
6061
6062 19, 20, 21,
6063 22, 23, 24,
6064 25, 26, 27,
6065 28, 29, 30,
6066 31, 32, 33,
6067 34, 35, 36,
6068 })
6069 );
6070
6071 // Quatized input2 tensor. Range [-1, 4]
6072 const float inputScale2 = 0.019608f;
6073 const int32_t inputOffset2 = 50;
6074
6075 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6076 {
6077 37, 38, 39,
6078 40, 41, 42,
6079 43, 44, 45,
6080 46, 47, 48,
6081 49, 50, 51,
6082 52, 53, 54,
6083 })
6084 );
6085
6086 // Output has the same quantization parameters than input1,
6087 // so that only the requantization of input2 is required
6088 const float outputScale = 0.015686f;
6089 const int32_t outputOffset = 192;
6090
6091 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6092
6093 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
6094 {
6095 1, 2, 3,
6096 4, 5, 6,
6097 7, 8, 9,
6098 10, 11, 12,
6099 13, 14, 15,
6100 16, 17, 18,
6101
6102 19, 20, 21,
6103 22, 23, 24,
6104 25, 26, 27,
6105 28, 29, 30,
6106 31, 32, 33,
6107 34, 35, 36,
6108
6109 176, 177, 178,
6110 179, 181, 182,
6111 183, 184, 186,
6112 187, 188, 189,
6113 191, 192, 193,
6114 195, 196, 197,
6115 })
6116 );
6117
6118 outputTensorInfo.SetQuantizationScale(outputScale);
6119 outputTensorInfo.SetQuantizationOffset(outputOffset);
6120 inputTensorInfo1.SetQuantizationScale(inputScale1);
6121 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
6122 inputTensorInfo2.SetQuantizationScale(inputScale2);
6123 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
6124
6125 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
6126 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6127
6128 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
6129 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6130
6131 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6132
6133 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6134
6135 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6136 subTensorsSupported ?
6137 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6138 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6139
6140 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6141 subTensorsSupported ?
6142 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6143 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6144
6145 armnn::MergerQueueDescriptor data;
6146 armnn::WorkloadInfo info;
6147 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6148 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6149 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6150
6151 data.m_ViewOrigins.push_back(window1);
6152 data.m_ViewOrigins.push_back(window2);
6153
6154 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6155
6156 inputHandle1->Allocate();
6157 inputHandle2->Allocate();
6158 outputHandle->Allocate();
6159
6160 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6161 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6162
6163 workload->Execute();
6164
6165 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6166
6167 return ret;
6168}
6169
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006170LayerTestResult<uint8_t, 3> MergerUint8Test(
6171 armnn::IWorkloadFactory& workloadFactory,
6172 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006173{
surmeh013537c2c2018-05-18 16:31:43 +01006174 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00006175 unsigned int outputHeight = 6;
6176 unsigned int outputChannels = 3;
6177
surmeh013537c2c2018-05-18 16:31:43 +01006178 unsigned int inputWidth1 = 3;
6179 unsigned int inputHeight1 = 6;
6180 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00006181
surmeh013537c2c2018-05-18 16:31:43 +01006182 unsigned int inputWidth2 = 3;
6183 unsigned int inputHeight2 = 6;
6184 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00006185
telsoa01c577f2c2018-08-31 09:22:23 +01006186 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00006187 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6188 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6189 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006190
telsoa01c577f2c2018-08-31 09:22:23 +01006191 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00006192 const float scale = 0.13497836f;
6193 const int32_t offset = -7;
6194
6195 outputTensorInfo.SetQuantizationScale(scale);
6196 outputTensorInfo.SetQuantizationOffset(offset);
6197 inputTensorInfo1.SetQuantizationScale(scale);
6198 inputTensorInfo1.SetQuantizationOffset(offset);
6199 inputTensorInfo2.SetQuantizationScale(scale);
6200 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00006201
6202 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6203
6204 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01006205 {
6206 1, 2, 3,
6207 4, 5, 6,
6208 7, 8, 9,
6209 10, 11, 12,
6210 13, 14, 15,
6211 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006212
surmeh013537c2c2018-05-18 16:31:43 +01006213 19, 20, 21,
6214 22, 23, 24,
6215 25, 26, 27,
6216 28, 29, 30,
6217 31, 32, 33,
6218 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006219
surmeh013537c2c2018-05-18 16:31:43 +01006220 37, 38, 39,
6221 40, 41, 42,
6222 43, 44, 45,
6223 46, 47, 48,
6224 49, 50, 51,
6225 52, 53, 54,
6226 })
telsoa014fcda012018-03-09 14:13:49 +00006227 );
6228
telsoa014fcda012018-03-09 14:13:49 +00006229 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6230 {
surmeh013537c2c2018-05-18 16:31:43 +01006231 1, 2, 3,
6232 4, 5, 6,
6233 7, 8, 9,
6234 10, 11, 12,
6235 13, 14, 15,
6236 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006237
surmeh013537c2c2018-05-18 16:31:43 +01006238 19, 20, 21,
6239 22, 23, 24,
6240 25, 26, 27,
6241 28, 29, 30,
6242 31, 32, 33,
6243 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006244 })
6245 );
6246
6247 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6248 {
surmeh013537c2c2018-05-18 16:31:43 +01006249 37, 38, 39,
6250 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00006251 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01006252 46, 47, 48,
6253 49, 50, 51,
6254 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00006255 })
6256 );
6257
telsoa01c577f2c2018-08-31 09:22:23 +01006258 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00006259 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6260
telsoa01c577f2c2018-08-31 09:22:23 +01006261 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00006262 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6263
telsoa014fcda012018-03-09 14:13:49 +00006264
6265 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6266
6267 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6268
6269 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6270 subTensorsSupported ?
6271 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6272 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6273
6274 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6275 subTensorsSupported ?
6276 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6277 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6278
telsoa014fcda012018-03-09 14:13:49 +00006279
6280 armnn::MergerQueueDescriptor data;
6281 armnn::WorkloadInfo info;
6282 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6283 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00006284 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6285
6286 data.m_ViewOrigins.push_back(window1);
6287 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00006288
6289 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6290
6291 inputHandle1->Allocate();
6292 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006293 outputHandle->Allocate();
6294
6295 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6296 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006297
6298 workload->Execute();
6299
6300 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6301
6302 return ret;
6303}
6304
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006305LayerTestResult<uint8_t, 4> AdditionUint8Test(
6306 armnn::IWorkloadFactory& workloadFactory,
6307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006308{
6309 unsigned int batchSize = 1;
6310 unsigned int channels = 2;
6311 unsigned int height = 2;
6312 unsigned int width = 3;
6313
6314 const float scale = 7.0f;
6315 const int32_t offset = 3;
6316
6317 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
6318 armnn::TensorInfo outputTensorInfo;
6319
6320 const unsigned int shape[] = { batchSize, channels, height, width };
6321 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
6322 inputTensorInfo1.SetQuantizationScale(scale);
6323 inputTensorInfo1.SetQuantizationOffset(offset);
6324
6325 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
6326 inputTensorInfo2.SetQuantizationScale(scale);
6327 inputTensorInfo2.SetQuantizationOffset(offset);
6328
6329 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
6330 outputTensorInfo.SetQuantizationScale(scale);
6331 outputTensorInfo.SetQuantizationOffset(offset);
6332
telsoa01c577f2c2018-08-31 09:22:23 +01006333 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00006334 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
6335 {
6336 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
6337 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
6338 }));
6339
telsoa01c577f2c2018-08-31 09:22:23 +01006340 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00006341 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
6342 {
6343 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
6344 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
6345 }));
6346
telsoa01c577f2c2018-08-31 09:22:23 +01006347 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00006348 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6349 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
6350 {
6351 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
6352 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
6353 }));
6354
6355 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6356 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
6357 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6358
6359 armnn::AdditionQueueDescriptor data;
6360 armnn::WorkloadInfo info;
6361 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6362 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6363 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6364
6365 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
6366
6367 inputHandle1->Allocate();
6368 inputHandle2->Allocate();
6369 outputHandle->Allocate();
6370
6371 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
6372 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
6373
6374 workload->Execute();
6375
6376 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6377
6378 return result;
6379}
6380
surmeh01bceff2f2018-03-29 16:29:27 +01006381namespace
telsoa014fcda012018-03-09 14:13:49 +00006382{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006383LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(
6384 armnn::IWorkloadFactory& workloadFactory,
6385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6386 const unsigned int shape0[4],
6387 const std::vector<uint8_t> & values0,
6388 float scale0,
6389 int32_t offset0,
6390 const unsigned int shape1[4],
6391 const std::vector<uint8_t> & values1,
6392 float scale1,
6393 int32_t offset1,
6394 const unsigned int outShape[4],
6395 const std::vector<uint8_t> & outValues,
6396 float outScale,
6397 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01006398{
6399 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
6400 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
6401 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006402
surmeh01bceff2f2018-03-29 16:29:27 +01006403 inputTensorInfo0.SetQuantizationScale(scale0);
6404 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00006405
surmeh01bceff2f2018-03-29 16:29:27 +01006406 inputTensorInfo1.SetQuantizationScale(scale1);
6407 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00006408
surmeh01bceff2f2018-03-29 16:29:27 +01006409 outputTensorInfo.SetQuantizationScale(outScale);
6410 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00006411
surmeh01bceff2f2018-03-29 16:29:27 +01006412 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
6413 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00006414
telsoa014fcda012018-03-09 14:13:49 +00006415 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01006416 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00006417
surmeh01bceff2f2018-03-29 16:29:27 +01006418 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00006419 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00006420 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6421
6422 armnn::MultiplicationQueueDescriptor data;
6423 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01006424 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6425 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00006426 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6427
6428 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
6429
surmeh01bceff2f2018-03-29 16:29:27 +01006430 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006431 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006432 outputHandle->Allocate();
6433
surmeh01bceff2f2018-03-29 16:29:27 +01006434 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006435 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006436
6437 workload->Execute();
6438
6439 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6440
6441 return result;
6442}
surmeh01bceff2f2018-03-29 16:29:27 +01006443} // anonymous namespace
6444
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006445LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
6446 armnn::IWorkloadFactory& workloadFactory,
6447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006448{
6449 unsigned int batchSize = 1;
6450 unsigned int channels = 2;
6451 unsigned int height = 2;
6452 unsigned int width = 3;
6453 const unsigned int shape[] = { batchSize, channels, height, width };
6454
telsoa01c577f2c2018-08-31 09:22:23 +01006455 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006456 std::vector<uint8_t> input0({
6457 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
6458 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
6459 });
6460
telsoa01c577f2c2018-08-31 09:22:23 +01006461 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006462 std::vector<uint8_t> input1({
6463 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
6464 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
6465 });
6466
telsoa01c577f2c2018-08-31 09:22:23 +01006467 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006468 std::vector<uint8_t> output(
6469 {
6470 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
6471 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
6472 });
6473
6474 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006475 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01006476 shape,
6477 input0,
6478 4.0f,
6479 1,
6480 shape,
6481 input1,
6482 3.0f,
6483 -2,
6484 shape,
6485 output,
telsoa01c577f2c2018-08-31 09:22:23 +01006486 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01006487 -5);
6488}
6489
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006490LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
6491 armnn::IWorkloadFactory& workloadFactory,
6492 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006493{
6494 const unsigned int shape0[] = { 1, 2, 2, 3 };
6495 const unsigned int shape1[] = { 1, 1, 1, 1 };
6496
6497 std::vector<uint8_t> input0({
6498 1, 2, 3, 4, 5, 6,
6499 7, 8, 9, 10, 11, 12
6500 });
6501
6502 std::vector<uint8_t> input1({2});
6503
6504 std::vector<uint8_t> output({
6505 2, 4, 6, 8, 10, 12,
6506 14, 16, 18, 20, 22, 24
6507 });
6508
6509 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006510 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01006511 shape0,
6512 input0,
6513 1.0f,
6514 0,
6515 shape1,
6516 input1,
6517 1.0f,
6518 0,
6519 shape0,
6520 output,
6521 1.0f,
6522 0);
6523}
6524
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006525LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
6526 armnn::IWorkloadFactory& workloadFactory,
6527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006528{
6529 const unsigned int shape0[] = { 1, 2, 2, 3 };
6530 const unsigned int shape1[] = { 1, 1, 1, 3 };
6531
6532 std::vector<uint8_t> input0({
6533 1, 2, 3, 4, 5, 6,
6534 7, 8, 9, 10, 11, 12
6535 });
6536
6537 std::vector<uint8_t> input1({1, 2, 3});
6538
6539 std::vector<uint8_t> output({
6540 1, 4, 9, 4, 10, 18,
6541 7, 16, 27, 10, 22, 36
6542 });
6543
6544 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006545 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01006546 shape0,
6547 input0,
6548 1.0f,
6549 0,
6550 shape1,
6551 input1,
6552 1.0f,
6553 0,
6554 shape0,
6555 output,
6556 1.0f,
6557 0);
6558}
telsoa014fcda012018-03-09 14:13:49 +00006559
David Beckf195f032018-09-06 16:46:34 +01006560namespace
6561{
6562template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006563LayerTestResult<T, 4> SubtractionTestHelper(
6564 armnn::IWorkloadFactory& workloadFactory,
6565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6566 const unsigned int shape0[4],
6567 const std::vector<T>& values0,
6568 float scale0,
6569 int32_t offset0,
6570 const unsigned int shape1[4],
6571 const std::vector<T> & values1,
6572 float scale1,
6573 int32_t offset1,
6574 const unsigned int outShape[4],
6575 const std::vector<T> & outValues,
6576 float outScale,
6577 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01006578{
6579 auto dataType = (std::is_same<T, uint8_t>::value ?
6580 armnn::DataType::QuantisedAsymm8 :
6581 armnn::DataType::Float32);
6582
6583 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
6584 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
6585 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
6586
6587 inputTensorInfo0.SetQuantizationScale(scale0);
6588 inputTensorInfo0.SetQuantizationOffset(offset0);
6589
6590 inputTensorInfo1.SetQuantizationScale(scale1);
6591 inputTensorInfo1.SetQuantizationOffset(offset1);
6592
6593 outputTensorInfo.SetQuantizationScale(outScale);
6594 outputTensorInfo.SetQuantizationOffset(outOffset);
6595
6596 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
6597 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
6598
6599 LayerTestResult<T, 4> result(outputTensorInfo);
6600 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
6601
6602 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
6603 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6604 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6605
6606 armnn::SubtractionQueueDescriptor data;
6607 armnn::WorkloadInfo info;
6608 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6609 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6610 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6611
6612 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
6613
6614 inputHandle0->Allocate();
6615 inputHandle1->Allocate();
6616 outputHandle->Allocate();
6617
6618 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
6619 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
6620
David Beckf195f032018-09-06 16:46:34 +01006621 workload->Execute();
6622
6623 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6624
6625 return result;
6626}
6627} // anonymous namespace
6628
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006629LayerTestResult<uint8_t, 4> SubtractionUint8Test(
6630 armnn::IWorkloadFactory& workloadFactory,
6631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006632{
6633 const unsigned int shape0[] = { 1, 1, 2, 2 };
6634 const unsigned int shape1[] = { 1, 1, 2, 2 };
6635
6636 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6637 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
6638 std::vector<uint8_t> output({ 3, 3, 5, 5 });
6639
6640 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006641 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006642 shape0, input0, 0.5f, 2,
6643 shape1, input1, 1.0f, 0,
6644 shape0, output, 1.0f, 0);
6645}
6646
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006647LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
6648 armnn::IWorkloadFactory& workloadFactory,
6649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006650{
6651 const unsigned int shape0[] = { 1, 1, 2, 2 };
6652 const unsigned int shape1[] = { 1, 1, 1, 1 };
6653
6654 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6655 std::vector<uint8_t> input1({ 2 });
6656 std::vector<uint8_t> output({ 5, 6, 7, 8 });
6657
6658 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006659 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006660 shape0, input0, 0.5f, 2,
6661 shape1, input1, 1.0f, 0,
6662 shape0, output, 1.0f, 3);
6663}
6664
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006665LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
6666 armnn::IWorkloadFactory& workloadFactory,
6667 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006668{
6669 const unsigned int shape0[] = { 1, 1, 2, 2 };
6670 const unsigned int shape1[] = { 1, 1, 2, 1 };
6671
6672 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6673 std::vector<uint8_t> input1({ 2, 1 });
6674 std::vector<uint8_t> output({ 8, 11, 12, 15 });
6675
6676 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006677 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006678 shape0, input0, 1.0f, 0,
6679 shape1, input1, 1.0f, 0,
6680 shape0, output, 1.0f, 0);
6681}
6682
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006683LayerTestResult<float, 4> SubtractionTest(
6684 armnn::IWorkloadFactory& workloadFactory,
6685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006686{
6687 const unsigned int shape0[] = { 1, 1, 2, 2 };
6688 const unsigned int shape1[] = { 1, 1, 2, 2 };
6689
6690 std::vector<float> input0({ 1, 2, 3, 4 });
6691 std::vector<float> input1({ 1, -1, 0, 2 });
6692 std::vector<float> output({ 0, 3, 3, 2 });
6693
6694 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006695 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006696 shape0, input0, 1.0f, 0,
6697 shape1, input1, 1.0f, 0,
6698 shape0, output, 1.0f, 0);
6699}
6700
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006701LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
6702 armnn::IWorkloadFactory& workloadFactory,
6703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006704{
6705 const unsigned int shape0[] = { 1, 1, 2, 2 };
6706 const unsigned int shape1[] = { 1, 1, 1, 1 };
6707
6708 std::vector<float> input0({ 1, 2, 3, 4 });
6709 std::vector<float> input1({ 10 });
6710 std::vector<float> output({ -9, -8, -7, -6 });
6711
6712 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006713 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006714 shape0, input0, 1.0f, 0,
6715 shape1, input1, 1.0f, 0,
6716 shape0, output, 1.0f, 0);
6717}
6718
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006719LayerTestResult<float, 4> SubtractionBroadcastTest(
6720 armnn::IWorkloadFactory& workloadFactory,
6721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006722{
6723 const unsigned int shape0[] = { 1, 1, 2, 2 };
6724 const unsigned int shape1[] = { 1, 1, 1, 2 };
6725
6726 std::vector<float> input0({ 1, 2, 3, 4 });
6727 std::vector<float> input1({ 10, -5 });
6728 std::vector<float> output({ -9, 7, -7, 9 });
6729
6730 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006731 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006732 shape0, input0, 1.0f, 0,
6733 shape1, input1, 1.0f, 0,
6734 shape0, output, 1.0f, 0);
6735}
6736
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006737LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
6738 armnn::IWorkloadFactory& workloadFactory,
6739 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006740{
6741 constexpr unsigned int inputWidth = 4;
6742 constexpr unsigned int inputHeight = 4;
6743 constexpr unsigned int inputChannels = 1;
6744 constexpr unsigned int inputBatchSize = 1;
6745
6746 constexpr unsigned int outputWidth = inputWidth;
6747 constexpr unsigned int outputHeight = inputHeight;
6748 constexpr unsigned int outputChannels = inputChannels;
6749 constexpr unsigned int outputBatchSize = inputBatchSize;
6750
6751 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6752 armnn::DataType::QuantisedAsymm8);
6753 inputTensorInfo.SetQuantizationScale(1.5f);
6754 inputTensorInfo.SetQuantizationOffset(-3);
6755
6756 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6757 armnn::DataType::QuantisedAsymm8);
6758 outputTensorInfo.SetQuantizationScale(1.5f);
6759 outputTensorInfo.SetQuantizationOffset(-3);
6760
6761 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6762 1, 2, 3, 4,
6763 2, 3, 4, 5,
6764 3, 4, 5, 6,
6765 4, 5, 6, 7
6766 }));
6767
6768 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6769 result.outputExpected = input;
6770
6771 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6772 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6773
6774 armnn::ResizeBilinearQueueDescriptor descriptor;
6775 armnn::WorkloadInfo info;
6776 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6777 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6778
6779 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6780
6781 inputHandle->Allocate();
6782 outputHandle->Allocate();
6783 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6784
6785 workload->Execute();
6786
6787 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6788 return result;
6789}
6790
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006791LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
6792 armnn::IWorkloadFactory& workloadFactory,
6793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006794{
6795 constexpr unsigned int inputWidth = 2;
6796 constexpr unsigned int inputHeight = 2;
6797 constexpr unsigned int inputChannels = 1;
6798 constexpr unsigned int inputBatchSize = 1;
6799
6800 constexpr unsigned int outputWidth = inputWidth / 2;
6801 constexpr unsigned int outputHeight = inputHeight / 2;
6802 constexpr unsigned int outputChannels = inputChannels;
6803 constexpr unsigned int outputBatchSize = inputBatchSize;
6804
6805 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6806 armnn::DataType::QuantisedAsymm8);
6807 inputTensorInfo.SetQuantizationScale(0.1567f);
6808 inputTensorInfo.SetQuantizationOffset(1);
6809
6810 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6811 armnn::DataType::QuantisedAsymm8);
6812 outputTensorInfo.SetQuantizationScale(0.1567f);
6813 outputTensorInfo.SetQuantizationOffset(1);
6814
6815 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6816 1, 255,
6817 200, 250
6818 }));
6819
6820 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
6821 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01006822 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00006823 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
6824 // the centre).
6825 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6826 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6827 1
6828 }));
6829
6830 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6831 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6832
6833 armnn::ResizeBilinearQueueDescriptor descriptor;
6834 armnn::WorkloadInfo info;
6835 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6836 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6837
6838 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6839
6840 inputHandle->Allocate();
6841 outputHandle->Allocate();
6842 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6843
6844 workload->Execute();
6845
6846 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6847 return result;
6848}
6849
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006850LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
6851 armnn::IWorkloadFactory& workloadFactory,
6852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006853{
6854 constexpr unsigned int inputWidth = 4;
6855 constexpr unsigned int inputHeight = 4;
6856 constexpr unsigned int inputChannels = 1;
6857 constexpr unsigned int inputBatchSize = 1;
6858
6859 constexpr unsigned int outputWidth = inputWidth / 2;
6860 constexpr unsigned int outputHeight = inputHeight / 2;
6861 constexpr unsigned int outputChannels = inputChannels;
6862 constexpr unsigned int outputBatchSize = inputBatchSize;
6863
6864 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6865 armnn::DataType::QuantisedAsymm8);
6866 inputTensorInfo.SetQuantizationScale(3.141592f);
6867 inputTensorInfo.SetQuantizationOffset(3);
6868
6869 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6870 armnn::DataType::QuantisedAsymm8);
6871 outputTensorInfo.SetQuantizationScale(3.141592f);
6872 outputTensorInfo.SetQuantizationOffset(3);
6873
6874 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6875 1, 2, 3, 4,
6876 2, 3, 4, 5,
6877 3, 4, 5, 6,
6878 4, 5, 6, 7
6879 }));
6880
6881 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6882 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6883 1, 3,
6884 3, 5
6885 }));
6886
6887 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6888 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6889
6890 armnn::ResizeBilinearQueueDescriptor descriptor;
6891 armnn::WorkloadInfo info;
6892 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6893 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6894
6895 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6896
6897 inputHandle->Allocate();
6898 outputHandle->Allocate();
6899 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6900
6901 workload->Execute();
6902
6903 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6904 return result;
6905}
6906
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006907LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
6908 armnn::IWorkloadFactory& workloadFactory,
6909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006910{
6911 constexpr unsigned int inputWidth = 3;
6912 constexpr unsigned int inputHeight = 2;
6913 constexpr unsigned int inputChannels = 1;
6914 constexpr unsigned int inputBatchSize = 1;
6915
6916 constexpr unsigned int outputWidth = 2;
6917 constexpr unsigned int outputHeight = 1;
6918 constexpr unsigned int outputChannels = inputChannels;
6919 constexpr unsigned int outputBatchSize = inputBatchSize;
6920
6921 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6922 armnn::DataType::QuantisedAsymm8);
6923 inputTensorInfo.SetQuantizationScale(1.5f);
6924 inputTensorInfo.SetQuantizationOffset(-1);
6925
6926 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6927 armnn::DataType::QuantisedAsymm8);
6928 outputTensorInfo.SetQuantizationScale(1.5f);
6929 outputTensorInfo.SetQuantizationOffset(-1);
6930
6931 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6932 1, 2, 3, // 3.0, 4.5, 6.0
6933 5, 8, 13 // 9.0, 13.5, 21.0
6934 }));
6935
6936 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6937 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6938 1, 3 // 3.0, 5.25
6939 }));
6940
6941 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6942 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6943
6944 armnn::ResizeBilinearQueueDescriptor descriptor;
6945 armnn::WorkloadInfo info;
6946 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6947 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6948
6949 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6950
6951 inputHandle->Allocate();
6952 outputHandle->Allocate();
6953
6954 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6955
6956 workload->Execute();
6957
6958 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6959 return result;
6960}
6961
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006962LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
6963 armnn::IWorkloadFactory& workloadFactory,
6964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006965{
6966 constexpr unsigned int inputWidth = 2;
6967 constexpr unsigned int inputHeight = 3;
6968 constexpr unsigned int inputChannels = 1;
6969 constexpr unsigned int inputBatchSize = 1;
6970
6971 constexpr unsigned int outputWidth = 5;
6972 constexpr unsigned int outputHeight = 3;
6973 constexpr unsigned int outputChannels = inputChannels;
6974 constexpr unsigned int outputBatchSize = inputBatchSize;
6975
6976 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6977 armnn::DataType::QuantisedAsymm8);
6978 inputTensorInfo.SetQuantizationScale(0.010765f);
6979 inputTensorInfo.SetQuantizationOffset(7);
6980
6981 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6982 armnn::DataType::QuantisedAsymm8);
6983 outputTensorInfo.SetQuantizationScale(0.010132f);
6984 outputTensorInfo.SetQuantizationOffset(-18);
6985
6986 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6987 24, 228, // 0.183005, 2.379065,
6988 105, 128, // 1.05497, 1.302565
6989 230, 71 // 2.400595, 0.68896
6990 }));
6991
6992 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6993 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6994 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
6995 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
6996 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
6997 }));
6998
6999 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7000 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7001
7002 armnn::ResizeBilinearQueueDescriptor descriptor;
7003 armnn::WorkloadInfo info;
7004 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7005 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7006
7007 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7008
7009 inputHandle->Allocate();
7010 outputHandle->Allocate();
7011 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7012
7013 workload->Execute();
7014
7015 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7016 return result;
7017}
7018
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007019LayerTestResult<float, 2> Rsqrt2dTestCommon(
7020 armnn::IWorkloadFactory& workloadFactory,
7021 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7022 const armnn::TensorInfo inputTensorInfo,
7023 const armnn::TensorInfo outputTensorInfo,
7024 std::vector<float> inputValues,
7025 std::vector<float> expectedOutputValues)
7026{
7027 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, std::vector<float>(inputValues));
7028
7029 LayerTestResult<float, 2> result(outputTensorInfo);
7030 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(expectedOutputValues));
7031
7032 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7033 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7034
7035 armnn::RsqrtQueueDescriptor descriptor;
7036
7037 armnn::WorkloadInfo info;
7038
7039 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7040 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7041
7042 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7043
7044 inputHandle->Allocate();
7045 outputHandle->Allocate();
7046
7047 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7048
7049 workload->Execute();
7050
7051 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7052
7053 return result;
7054}
7055LayerTestResult<float, 2> Rsqrt2dTest(
7056 armnn::IWorkloadFactory& workloadFactory,
7057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7058{
7059 const armnn::TensorShape inputShape{ 2, 2 };
7060 const armnn::TensorShape outputShape{ 2, 2 };
7061
7062 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7063 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7064
7065 std::vector<float> inputValues
7066 {
7067 1.f, 4.f,
7068 16.f, 25.f
7069 };
7070
7071 std::vector<float> expectedOutputValues
7072 {
7073 1.f, 0.5f,
7074 0.25f, 0.2f
7075 };
7076
7077 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7078 inputTensorInfo, outputTensorInfo,
7079 inputValues, expectedOutputValues);
7080}
7081
7082LayerTestResult<float, 3> Rsqrt3dTest(
7083 armnn::IWorkloadFactory& workloadFactory,
7084 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7085{
7086 const armnn::TensorShape inputShape{ 3, 1, 2 };
7087 const armnn::TensorShape outputShape{ 3, 1, 2 };
7088
7089 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7090 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7091
7092 std::vector<float> inputValues
7093 {
7094 1.f, 4.f, 16.f,
7095 25.f, 64.f, 100.f
7096 };
7097
7098 std::vector<float> expectedOutputValues
7099 {
7100 1.f, 0.5f, 0.25f,
7101 0.2f, 0.125f, 0.1f
7102 };
7103
7104 auto inputTensor = MakeTensor<float, 3>(inputTensorInfo, std::vector<float>(inputValues));
7105
7106 LayerTestResult<float, 3> result(outputTensorInfo);
7107 result.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float >(expectedOutputValues));
7108
7109 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7110 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7111
7112 armnn::RsqrtQueueDescriptor descriptor;
7113
7114 armnn::WorkloadInfo info;
7115
7116 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7117 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7118
7119 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7120
7121 inputHandle->Allocate();
7122 outputHandle->Allocate();
7123
7124 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
7125
7126 workload->Execute();
7127
7128 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
7129
7130 return result;
7131}
7132
7133LayerTestResult<float, 2> RsqrtZeroTest(
7134 armnn::IWorkloadFactory& workloadFactory,
7135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7136{
7137 const armnn::TensorShape inputShape{ 1, 2 };
7138 const armnn::TensorShape outputShape{ 1, 2 };
7139
7140 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7141 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7142
7143 std::vector<float> inputValues
7144 {
7145 0.f, -0.f
7146 };
7147
7148 std::vector<float> expectedOutputValues
7149 {
7150 INFINITY, -INFINITY
7151 };
7152
7153 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7154 inputTensorInfo, outputTensorInfo,
7155 inputValues, expectedOutputValues);
7156}
7157
7158LayerTestResult<float, 2> RsqrtNegativeTest(
7159 armnn::IWorkloadFactory& workloadFactory,
7160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7161{
7162 const armnn::TensorShape inputShape{ 1, 2 };
7163 const armnn::TensorShape outputShape{ 1, 2 };
7164
7165 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7166 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7167
7168 std::vector<float> inputValues
7169 {
7170 -25.f, -16.f
7171 };
7172
7173 std::vector<float> expectedOutputValues
7174 {
7175 -NAN, -NAN
7176 };
7177
7178 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7179 inputTensorInfo, outputTensorInfo,
7180 inputValues, expectedOutputValues);
7181}
7182
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007183LayerTestResult<float, 4> BatchNormTest(
7184 armnn::IWorkloadFactory& workloadFactory,
7185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007186{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007187 // BatchSize: 1
7188 // Channels: 2
7189 // Height: 3
7190 // Width: 2
7191
7192 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7193 std::vector<float> inputValues
7194 {
7195 // Batch 0, Channel 0, Height (3) x Width (2)
7196 1.f, 4.f,
7197 4.f, 2.f,
7198 1.f, 6.f,
7199
7200 // Batch 0, Channel 1, Height (3) x Width (2)
7201 1.f, 1.f,
7202 4.f, 1.f,
7203 -2.f, 4.f
7204 };
7205 std::vector<float> expectedOutputValues
7206 {
7207 // Batch 0, Channel 0, Height (3) x Width (2)
7208 1.f, 4.f,
7209 4.f, 2.f,
7210 1.f, 6.f,
7211
7212 // Batch 0, Channel 1, Height (3) x Width (2)
7213 3.f, 3.f,
7214 4.f, 3.f,
7215 2.f, 4.f
7216 };
7217
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007218 return BatchNormTestImpl<armnn::DataType::Float32>(
7219 workloadFactory, memoryManager,
7220 inputOutputShape, inputValues, expectedOutputValues,
7221 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007222}
7223
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007224LayerTestResult<float, 4> BatchNormNhwcTest(
7225 armnn::IWorkloadFactory& workloadFactory,
7226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007227{
7228 // BatchSize: 1
7229 // Height: 3
7230 // Width: 2
7231 // Channels: 2
7232
7233 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7234 std::vector<float> inputValues
7235 {
7236 // Batch 0, Height 0, Width (2) x Channel (2)
7237 1.f, 1.f,
7238 4.f, 1.f,
7239
7240 // Batch 0, Height 1, Width (2) x Channel (2)
7241 4.f, 4.f,
7242 2.f, 1.f,
7243
7244 // Batch 0, Height 2, Width (2) x Channel (2)
7245 1.f, -2.f,
7246 6.f, 4.f
7247 };
7248 std::vector<float> expectedOutputValues
7249 {
7250 // Batch 0, Height 0, Width (2) x Channel (2)
7251 1.f, 3.f,
7252 4.f, 3.f,
7253
7254 // Batch 0, Height 1, Width (2) x Channel (2)
7255 4.f, 4.f,
7256 2.f, 3.f,
7257
7258 // Batch 0, Height 2, Width (2) x Channel (2)
7259 1.f, 2.f,
7260 6.f, 4.f
7261 };
7262
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007263 return BatchNormTestImpl<armnn::DataType::Float32>(
7264 workloadFactory, memoryManager,
7265 inputOutputShape, inputValues, expectedOutputValues,
7266 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00007267}
7268
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007269LayerTestResult<uint8_t, 4> BatchNormUint8Test(
7270 armnn::IWorkloadFactory& workloadFactory,
7271 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007272{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007273 // BatchSize: 1
7274 // Channels: 2
7275 // Height: 3
7276 // Width: 2
7277
7278 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7279 std::vector<float> inputValues
7280 {
7281 // Batch 0, Channel 0, Height (3) x Width (2)
7282 1.f, 4.f,
7283 4.f, 2.f,
7284 1.f, 6.f,
7285
7286 // Batch 0, Channel 1, Height (3) x Width (2)
7287 1.f, 1.f,
7288 4.f, 1.f,
7289 -2.f, 4.f
7290 };
7291 std::vector<float> expectedOutputValues
7292 {
7293 // Batch 0, Channel 0, Height (3) x Width (2)
7294 1.f, 4.f,
7295 4.f, 2.f,
7296 1.f, 6.f,
7297
7298 // Batch 0, Channel 1, Height (3) x Width (2)
7299 3.f, 3.f,
7300 4.f, 3.f,
7301 2.f, 4.f
7302 };
7303
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007304 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
7305 workloadFactory, memoryManager,
7306 inputOutputShape, inputValues, expectedOutputValues,
7307 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007308}
7309
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007310LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
7311 armnn::IWorkloadFactory& workloadFactory,
7312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007313{
7314 // BatchSize: 1
7315 // Height: 3
7316 // Width: 2
7317 // Channels: 2
7318
7319 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7320 std::vector<float> inputValues
7321 {
7322 // Batch 0, Height 0, Width (2) x Channel (2)
7323 1.f, 1.f,
7324 4.f, 1.f,
7325
7326 // Batch 0, Height 1, Width (2) x Channel (2)
7327 4.f, 4.f,
7328 2.f, 1.f,
7329
7330 // Batch 0, Height 2, Width (2) x Channel (2)
7331 1.f, -2.f,
7332 6.f, 4.f
7333 };
7334 std::vector<float> expectedOutputValues
7335 {
7336 // Batch 0, Height 0, Width (2) x Channel (2)
7337 1.f, 3.f,
7338 4.f, 3.f,
7339
7340 // Batch 0, Height 1, Width (2) x Channel (2)
7341 4.f, 4.f,
7342 2.f, 3.f,
7343
7344 // Batch 0, Height 2, Width (2) x Channel (2)
7345 1.f, 2.f,
7346 6.f, 4.f
7347 };
7348
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007349 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
7350 (workloadFactory, memoryManager,
7351 inputOutputShape, inputValues, expectedOutputValues,
7352 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00007353}
7354
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007355LayerTestResult<uint8_t, 4> ConstantUint8Test(
7356 armnn::IWorkloadFactory& workloadFactory,
7357 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007358{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007359 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00007360}
7361
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007362LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
7363 armnn::IWorkloadFactory& workloadFactory,
7364 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007365{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007366 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007367}
7368
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007369LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
7370 armnn::IWorkloadFactory& workloadFactory,
7371 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007372{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007373 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007374}
7375
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007376LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
7377 armnn::IWorkloadFactory& workloadFactory,
7378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007379{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007380 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007381}
7382
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007383LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
7384 armnn::IWorkloadFactory& workloadFactory,
7385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007386{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007387 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7388 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007389}
7390
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007391LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
7392 armnn::IWorkloadFactory& workloadFactory,
7393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007394{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007395 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7396 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007397}
7398
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007399LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
7400 armnn::IWorkloadFactory& workloadFactory,
7401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007402{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007403 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007404}
7405
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007406LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
7407 armnn::IWorkloadFactory& workloadFactory,
7408 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007410 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007411}
7412
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007413LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
7414 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00007415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7416 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00007417{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007418 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
7419 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007420}
7421
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007422LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
7423 armnn::IWorkloadFactory& workloadFactory,
7424 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007425{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007426 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007427}
7428
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007429LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
7430 armnn::IWorkloadFactory& workloadFactory,
7431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007432{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007433 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7434 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007435}
7436
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007437LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
7438 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00007439 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7440 bool useSubtensor)
7441{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007442 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7443 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007444}
7445
7446LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
7447 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007448 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007449{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007450 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007451}
7452
7453LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
7454 armnn::IWorkloadFactory& workloadFactory,
7455 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7456{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007457 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007458}
7459
7460LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
7461 armnn::IWorkloadFactory& workloadFactory,
7462 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7463{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007464 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007465}
7466
7467LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
7468 armnn::IWorkloadFactory& workloadFactory,
7469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
7470{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007471 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
7472 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00007473}
7474
7475LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
7476 armnn::IWorkloadFactory& workloadFactory,
7477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7478{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007479 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
7480 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007481}
7482
7483LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
7484 armnn::IWorkloadFactory& workloadFactory,
7485 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7486{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007487 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
7488 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007489}
7490
7491LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
7492 armnn::IWorkloadFactory& workloadFactory,
7493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7494{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007495 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
7496 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007497}
7498
7499LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
7500 armnn::IWorkloadFactory& workloadFactory,
7501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7502 bool useSubtensor)
7503{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007504 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
7505 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00007506}
7507
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007508LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
7509 armnn::IWorkloadFactory& workloadFactory,
7510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7511 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007512{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007513 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
7514 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00007515}
7516
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007517LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
7518 armnn::IWorkloadFactory& workloadFactory,
7519 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7520 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007521{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007522 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007523 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007524}
7525
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007526LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
7527 armnn::IWorkloadFactory& workloadFactory,
7528 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7529 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007530{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007531 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
7532 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00007533}
7534
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007535LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
7536 armnn::IWorkloadFactory& workloadFactory,
7537 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7538 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007539{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007540 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007541 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00007542}
7543
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007544LayerTestResult<float, 4> SimpleMaxPooling2dTest(
7545 armnn::IWorkloadFactory& workloadFactory,
7546 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007547 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007548{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007549 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00007550}
7551
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007552LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
7553 armnn::IWorkloadFactory& workloadFactory,
7554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007555 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01007556{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007557 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01007558}
7559
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007560LayerTestResult<float, 4> SimpleAveragePooling2dTest(
7561 armnn::IWorkloadFactory& workloadFactory,
7562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007563 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007564{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007565 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01007566}
7567
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007568LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
7569 armnn::IWorkloadFactory& workloadFactory,
7570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007571 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01007572{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007573 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007574 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00007575}
7576
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007577LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
7578 armnn::IWorkloadFactory& workloadFactory,
7579 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7580 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01007581{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007582 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007583 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01007584}
7585
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007586LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
7587 armnn::IWorkloadFactory& workloadFactory,
7588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007589{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007590 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007591}
7592
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007593LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
7594 armnn::IWorkloadFactory& workloadFactory,
7595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007596{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007597 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
7598 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00007599}
7600
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007601LayerTestResult<float, 4> SimpleL2Pooling2dTest(
7602 armnn::IWorkloadFactory& workloadFactory,
7603 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007604 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007605{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007606 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00007607}
7608
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007609LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
7610 armnn::IWorkloadFactory& workloadFactory,
7611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007612 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007613{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007614 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00007615}
7616
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007617LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
7618 armnn::IWorkloadFactory& workloadFactory,
7619 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007620{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007621 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007622}
7623
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007624LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
7625 armnn::IWorkloadFactory& workloadFactory,
7626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007627{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007628 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007629}
7630
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007631LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
7632 armnn::IWorkloadFactory& workloadFactory,
7633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007634{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007635 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007636}
7637
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007638LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
7639 armnn::IWorkloadFactory& workloadFactory,
7640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007641{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007642 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007643}
7644
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007645LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
7646 armnn::IWorkloadFactory& workloadFactory,
7647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007648{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007649 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007650}
7651
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007652LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
7653 armnn::IWorkloadFactory& workloadFactory,
7654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007655{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007656 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007657}
7658
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007659LayerTestResult<float, 4> L2Pooling2dSize7Test(
7660 armnn::IWorkloadFactory& workloadFactory,
7661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007662{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007663 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007664}
7665
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007666LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
7667 armnn::IWorkloadFactory& workloadFactory,
7668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007669{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007670 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007671}
7672
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007673LayerTestResult<float, 4> L2Pooling2dSize9Test(
7674 armnn::IWorkloadFactory& workloadFactory,
7675 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007676{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007677 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007678}
7679
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007680LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
7681 armnn::IWorkloadFactory& workloadFactory,
7682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007683{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007684 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007685}
7686
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007687LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
7688 armnn::IWorkloadFactory& workloadFactory,
7689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007690{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007691 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007692}
7693
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007694LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
7695 armnn::IWorkloadFactory& workloadFactory,
7696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007697{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007698 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007699}
7700
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007701LayerTestResult<float, 4> ComparePooling2dTest(
7702 armnn::IWorkloadFactory& workloadFactory,
7703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7704 armnn::IWorkloadFactory& refWorkloadFactory,
7705 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00007706{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007707 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007708 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00007709}
7710
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007711LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
7712 armnn::IWorkloadFactory& workloadFactory,
7713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7714 armnn::IWorkloadFactory& refWorkloadFactory,
7715 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00007716{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007717 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007718 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00007719}
7720
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007721LayerTestResult<float, 2> FullyConnectedLargeTest(
7722 armnn::IWorkloadFactory& workloadFactory,
7723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7724 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00007725{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007726 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00007727}
7728
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007729LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
7730 armnn::IWorkloadFactory& workloadFactory,
7731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007732{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007733 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007734}
7735
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007736LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
7737 armnn::IWorkloadFactory& workloadFactory,
7738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007739{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007740 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
7741 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007742}
7743
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007744LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
7745 armnn::IWorkloadFactory& workloadFactory,
7746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007747{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007748 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007749}
7750
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007751LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
7752 armnn::IWorkloadFactory& workloadFactory,
7753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007754{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007755 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
7756 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007757}
7758
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007759LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
7760 armnn::IWorkloadFactory& workloadFactory,
7761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007762{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007763 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007764}
7765
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007766LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
7767 armnn::IWorkloadFactory& workloadFactory,
7768 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007769{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007770 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
7771 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007772}
7773
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007774LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
7775 armnn::IWorkloadFactory& workloadFactory,
7776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007777{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007778 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
7779 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007780}
7781
7782LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007783 armnn::IWorkloadFactory& workloadFactory,
7784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007785{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007786 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
7787 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007788}
7789
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007790LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
7791 armnn::IWorkloadFactory& workloadFactory,
7792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007793{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007794 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007795}
7796
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007797LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
7798 armnn::IWorkloadFactory& workloadFactory,
7799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007800{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007801 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
7802 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007803}
7804
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007805LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
7806 armnn::IWorkloadFactory& workloadFactory,
7807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007808{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007809 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007810}
7811
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007812LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
7813 armnn::IWorkloadFactory& workloadFactory,
7814 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007815{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007816 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007817}
7818
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007819LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
7820 armnn::IWorkloadFactory& workloadFactory,
7821 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007822{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007823 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007824}
7825
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007826LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
7827 armnn::IWorkloadFactory& workloadFactory,
7828 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007829{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007830 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007831}
7832
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007833LayerTestResult<float, 4> SimplePermuteFloat32Test(
7834 armnn::IWorkloadFactory& workloadFactory,
7835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007836{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007837 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007838};
7839
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007840LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
7841 armnn::IWorkloadFactory& workloadFactory,
7842 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007843{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007844 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007845};
surmeh01bceff2f2018-03-29 16:29:27 +01007846
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007847LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
7848 armnn::IWorkloadFactory& workloadFactory,
7849 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007850{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007851 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01007852};
7853
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007854LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
7855 armnn::IWorkloadFactory& workloadFactory,
7856 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007857{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007858 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01007859};
7860
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007861LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
7862 armnn::IWorkloadFactory& workloadFactory,
7863 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007864{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007865 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01007866};
7867
7868namespace
7869{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007870
narpra011e4c31d2018-09-28 11:07:51 +01007871template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007872LayerTestResult<T, OutputDim> MeanTestHelper(
7873 armnn::IWorkloadFactory& workloadFactory,
7874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7875 const unsigned int* inputShape,
7876 const std::vector<T>& inputData,
7877 const std::vector<unsigned int>& axis,
7878 bool keepDims,
7879 const unsigned int* outputShape,
7880 const std::vector<T>& outputData,
7881 float scale = 1.0f,
7882 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01007883{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007884 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01007885
7886 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
7887 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
7888
7889 inputTensorInfo.SetQuantizationScale(scale);
7890 inputTensorInfo.SetQuantizationOffset(offset);
7891
7892 outputTensorInfo.SetQuantizationScale(scale);
7893 outputTensorInfo.SetQuantizationOffset(offset);
7894
7895 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
7896
7897 LayerTestResult<T, OutputDim> result(outputTensorInfo);
7898 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
7899
7900 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7901 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7902
7903 armnn::MeanQueueDescriptor data;
7904 data.m_Parameters.m_Axis = axis;
7905 data.m_Parameters.m_KeepDims = keepDims;
7906 armnn::WorkloadInfo info;
7907 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
7908 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7909
7910 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
7911
7912 inputHandle->Allocate();
7913 outputHandle->Allocate();
7914
7915 CopyDataToITensorHandle(inputHandle.get(), input.origin());
7916
narpra011e4c31d2018-09-28 11:07:51 +01007917 workload->Execute();
7918
7919 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
7920
7921 return result;
7922}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007923
narpra011e4c31d2018-09-28 11:07:51 +01007924} // anonymous namespace
7925
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007926LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
7927 armnn::IWorkloadFactory& workloadFactory,
7928 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007929{
7930 const unsigned int inputShape[] = { 3, 2 };
7931 const unsigned int outputShape[] = { 1 };
7932
7933 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7934 std::vector<uint8_t> output({ 2 });
7935
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007936 return MeanTestHelper<uint8_t, 2, 1>(
7937 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007938}
7939
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007940LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
7941 armnn::IWorkloadFactory& workloadFactory,
7942 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007943{
7944 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7945 const unsigned int outputShape[] = { 1, 1, 2 };
7946
7947 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7948 std::vector<uint8_t> output({ 2, 2 });
7949
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007950 return MeanTestHelper<uint8_t, 4, 3>(
7951 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007952}
7953
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007954LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
7955 armnn::IWorkloadFactory& workloadFactory,
7956 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007957{
7958 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7959 const unsigned int outputShape[] = { 1, 1, 1, 2 };
7960
7961 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7962 std::vector<uint8_t> output({ 2, 2 });
7963
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007964 return MeanTestHelper<uint8_t, 4, 4>(
7965 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007966}
7967
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007968LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
7969 armnn::IWorkloadFactory& workloadFactory,
7970 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007971{
7972 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7973 const unsigned int outputShape[] = { 1, 3, 1, 1 };
7974
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007975 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01007976 std::vector<uint8_t> output({ 1, 3, 5 });
7977
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007978 return MeanTestHelper<uint8_t, 4, 4>(
7979 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007980}
7981
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007982LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
7983 armnn::IWorkloadFactory& workloadFactory,
7984 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007985{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007986 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01007987 const unsigned int outputShape[] = { 2 };
7988
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007989 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
7990 24 });
7991 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01007992
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007993 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
7994 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007995 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01007996}
7997
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007998LayerTestResult<float, 1> MeanFloatSimpleTest(
7999 armnn::IWorkloadFactory& workloadFactory,
8000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008001{
8002 const unsigned int inputShape[] = { 3, 2 };
8003 const unsigned int outputShape[] = { 1 };
8004
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008005 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8006 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008007
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008008 return MeanTestHelper<float, 2, 1>(
8009 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008010}
8011
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008012LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
8013 armnn::IWorkloadFactory& workloadFactory,
8014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008015{
8016 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8017 const unsigned int outputShape[] = { 3, 1, 2 };
8018
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008019 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8020 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008021
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008022 return MeanTestHelper<float, 4, 3>(
8023 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008024}
8025
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008026LayerTestResult<float, 4> MeanFloatKeepDimsTest(
8027 armnn::IWorkloadFactory& workloadFactory,
8028 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008029{
8030 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8031 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8032
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008033 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8034 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008035
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008036 return MeanTestHelper<float, 4, 4>(
8037 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008038}
8039
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008040LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
8041 armnn::IWorkloadFactory& workloadFactory,
8042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008043{
8044 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8045 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8046
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008047 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8048 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008049
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008050 return MeanTestHelper<float, 4, 4>(
8051 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008052}
8053
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008054LayerTestResult<float, 1> MeanVtsFloat1Test(
8055 armnn::IWorkloadFactory& workloadFactory,
8056 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008057{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008058 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008059 const unsigned int outputShape[] = { 2 };
8060
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008061 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8062 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8063 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008064
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008065 return MeanTestHelper<float, 3, 1>(
8066 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008067}
8068
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008069LayerTestResult<float, 3> MeanVtsFloat2Test(
8070 armnn::IWorkloadFactory& workloadFactory,
8071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008072{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008073 const unsigned int inputShape[] = { 4, 3, 2 };
8074 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01008075
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008076 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8077 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8078 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008079
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008080 return MeanTestHelper<float, 3, 3>(
8081 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008082}
8083
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008084LayerTestResult<float, 3> MeanVtsFloat3Test(
8085 armnn::IWorkloadFactory& workloadFactory,
8086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008087{
8088 const unsigned int inputShape[] = { 1, 2, 2, 1 };
8089 const unsigned int outputShape[] = { 1, 2, 1 };
8090
8091 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
8092 std::vector<float> output({ 1.5f, 3.5f });
8093
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008094 return MeanTestHelper<float, 4, 3>(
8095 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008096}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008097
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008098LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
8099 armnn::IWorkloadFactory& workloadFactory,
8100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008101{
8102 // Create Initial Tensor
8103 // 1, 2, 3
8104 // 4, 5, 6
8105 // 7, 8, 9
8106
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008107 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
8108 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008109
8110 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
8111 {1, 2, 3,
8112 4, 5, 6,
8113 7, 8, 9
8114 });
8115
8116 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
8117 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
8118 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
8119 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
8120
8121 // Apply MaxPool poolSize = 1x1, stride=2x2
8122 // Result =
8123 // 1, 3
8124 // 7, 9
8125 armnn::Pooling2dDescriptor descriptor;
8126 descriptor.m_PoolHeight = 1;
8127 descriptor.m_PoolWidth = 1;
8128 descriptor.m_StrideX = 2;
8129 descriptor.m_StrideY = 2;
8130 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
8131
8132 armnn::Pooling2dQueueDescriptor queueDescriptor;
8133 queueDescriptor.m_Parameters = descriptor;
8134 armnn::WorkloadInfo workloadInfo;
8135 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
8136 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
8137
8138 // Create the MaxPool
8139 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
8140
8141 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
8142 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
8143 boost::multi_array<float, 4> resultMaxPool;
8144 resultMaxPool.resize(shape);
8145
8146
8147 // Create addition with another tensor the same size
8148 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
8149 // with the initial tensor.
8150 // 12, 16
8151 // 24, 28
8152
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008153 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
8154 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008155
8156 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
8157 {12, 16,
8158 24, 28,
8159 });
8160
8161 // Expected output tensor after MaxPool and Addition.
8162 LayerTestResult<float,4> addRet(addOutputTensorInfo);
8163 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
8164 {
8165 13, 19,
8166 31, 37
8167 }));
8168
8169 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
8170 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
8171
8172 armnn::AdditionQueueDescriptor data;
8173 armnn::WorkloadInfo info;
8174
8175 // Add the output of the MaxPool and the new tensor
8176 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
8177 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
8178 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
8179
8180 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
8181
8182 poolingInputHandle->Allocate();
8183 poolingOutputHandle->Allocate();
8184 addInputHandle->Allocate();
8185 addOutputHandle->Allocate();
8186
8187 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
8188 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
8189
8190 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
8191 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
8192
8193 workload->Execute();
8194 addWorkload->Execute();
8195
8196 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
8197
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008198 return addRet;
8199}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008200
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008201LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
8202 armnn::IWorkloadFactory& workloadFactory,
8203 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008204{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008205 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008206}
8207
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008208LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
8209 armnn::IWorkloadFactory& workloadFactory,
8210 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008211{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008212 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008213}
8214
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008215LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
8216 armnn::IWorkloadFactory& workloadFactory,
8217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008218{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008219 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008220}
8221
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008222LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
8223 armnn::IWorkloadFactory& workloadFactory,
8224 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008225{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008226 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008227}
8228
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008229LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
8230 armnn::IWorkloadFactory& workloadFactory,
8231 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008232{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008233 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008234}
8235
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008236LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
8237 armnn::IWorkloadFactory& workloadFactory,
8238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008239{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008240 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008241}
8242
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008243LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
8244 armnn::IWorkloadFactory& workloadFactory,
8245 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008246{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008247 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008248}
8249
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008250LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
8251 armnn::IWorkloadFactory& workloadFactory,
8252 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008253{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008254 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008255}
8256
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008257LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
8258 armnn::IWorkloadFactory& workloadFactory,
8259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008260{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008261 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008262}
8263
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008264LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
8265 armnn::IWorkloadFactory& workloadFactory,
8266 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008267{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008268 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008269}
8270
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008271LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
8272 armnn::IWorkloadFactory& workloadFactory,
8273 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008274{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008275 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008276}
8277
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008278LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
8279 armnn::IWorkloadFactory& workloadFactory,
8280 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008281{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008282 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008283}
8284
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008285LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
8286 armnn::IWorkloadFactory& workloadFactory,
8287 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008288{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008289 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008290}
8291
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008292LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
8293 armnn::IWorkloadFactory& workloadFactory,
8294 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008295{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008296 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008297}
8298
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008299LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
8300 armnn::IWorkloadFactory& workloadFactory,
8301 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008302{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008303 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008304}
8305
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008306LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
8307 armnn::IWorkloadFactory& workloadFactory,
8308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008309{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008310 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008311}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008312
8313namespace {
8314
8315template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008316LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
8317 armnn::IWorkloadFactory &workloadFactory,
8318 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8319 const armnn::DataLayout& dataLayout,
8320 const unsigned int *inputShape,
8321 const std::vector<T> &inputData,
8322 const std::vector<unsigned int> &blockShape,
8323 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
8324 const unsigned int *outputShape,
8325 const std::vector<T> &outputData,
8326 float scale = 1.0f,
8327 int32_t offset = 0)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008328 {
8329 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
8330
8331 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
8332 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
8333
8334 inputTensorInfo.SetQuantizationScale(scale);
8335 inputTensorInfo.SetQuantizationOffset(offset);
8336
8337 outputTensorInfo.SetQuantizationScale(scale);
8338 outputTensorInfo.SetQuantizationOffset(offset);
8339
8340 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
8341
8342 LayerTestResult<T, OutputDim> result(outputTensorInfo);
8343 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
8344
8345 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
8346 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8347
8348 armnn::BatchToSpaceNdQueueDescriptor data;
8349 data.m_Parameters.m_DataLayout = dataLayout;
8350 data.m_Parameters.m_BlockShape = blockShape;
8351 data.m_Parameters.m_Crops = crops;
8352 armnn::WorkloadInfo info;
8353 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
8354 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8355
8356 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
8357
8358 inputHandle->Allocate();
8359 outputHandle->Allocate();
8360
8361 CopyDataToITensorHandle(inputHandle.get(), input.origin());
8362
8363 workload->Execute();
8364
8365 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8366
8367 return result;
8368}
8369
8370} // anonymous namespace
8371
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008372LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
8373 armnn::IWorkloadFactory& workloadFactory,
8374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008375{
8376 const unsigned int inputShape[] = {4, 2, 2, 1};
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008377 const unsigned int outputShape[] = {1, 4, 4, 1};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008378
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008379 std::vector<float> input({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008380 // Batch 0, Height 0, Width (2) x Channel (1)
8381 1.0f, 3.0f,
8382 // Batch 0, Height 1, Width (2) x Channel (1)
8383 9.0f, 11.0f,
8384
8385
8386 // Batch 1, Height 0, Width (2) x Channel (1)
8387 2.0f, 4.0f,
8388 // Batch 1, Height 1, Width (2) x Channel (1)
8389 10.0f, 12.0f,
8390
8391
8392 // Batch 2, Height 0, Width (2) x Channel (1)
8393 5.0f, 7.0f,
8394 // Batch 2, Height 1, Width (2) x Channel (1)
8395 13.0f, 15.0f,
8396
8397 // Batch 3, Height 0, Width (2) x Channel (3)
8398 6.0f, 8.0f,
8399 // Batch 3, Height 1, Width (2) x Channel (1)
8400 14.0f, 16.0f
8401 });
8402
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008403 std::vector<float> expectedOutput({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008404 1.0f, 2.0f, 3.0f, 4.0f,
8405 5.0f, 6.0f, 7.0f, 8.0f,
8406 9.0f, 10.0f, 11.0f, 12.0f,
8407 13.0f, 14.0f, 15.0f, 16.0f
8408 });
8409
8410 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008411 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008412
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008413 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8414 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008415 crops, outputShape, expectedOutput);
8416}
8417
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008418LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
8419 armnn::IWorkloadFactory& workloadFactory,
8420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008421{
8422 const unsigned int inputShape[] = {4, 1, 1, 1};
8423 const unsigned int outputShape[] = {1, 2, 2, 1};
8424
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008425 std::vector<float> input({
8426 // Batch 0, Height 0, Width (2) x Channel (1)
8427 1.0f, 2.0f, 3.0f, 4.0f
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008428 });
8429
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008430 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008431
8432 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008433 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008435 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8436 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8437 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008438}
8439
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008440LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
8441 armnn::IWorkloadFactory& workloadFactory,
8442 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008443{
8444 const unsigned int inputShape[] = {4, 1, 1, 3};
8445 const unsigned int outputShape[] = {1, 2, 2, 3};
8446
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008447 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008448
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008449 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008450
8451 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008452 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008453
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008454 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8455 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8456 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008457}
8458
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008459LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test4(
8460 armnn::IWorkloadFactory& workloadFactory,
8461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8462{
8463 const unsigned int inputShape[] = {8, 1, 3, 1};
8464 const unsigned int outputShape[] = {2, 2, 4, 1};
8465
8466 std::vector<float> input({
8467 0.0f, 1.0f, 3.0f,
8468 0.0f, 9.0f, 11.0f,
8469 0.0f, 2.0f, 4.0f,
8470 0.0f, 10.0f, 12.0f,
8471 0.0f, 5.0f, 7.0f,
8472 0.0f, 13.0f, 15.0f,
8473 0.0f, 6.0f, 8.0f,
8474 0.0f, 14.0f, 16.0f
8475 });
8476
8477 std::vector<float> expectedOutput({
8478 1.0f, 2.0f, 3.0f, 4.0f,
8479 5.0f, 6.0f, 7.0f, 8.0f,
8480 9.0f, 10.0f, 11.0f, 12.0f,
8481 13.0f, 14.0f, 15.0f, 16.0f
8482 });
8483
8484 std::vector<unsigned int> blockShape({2, 2});
8485 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
8486
8487 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8488 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8489 crops, outputShape, expectedOutput);
8490}
8491
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008492LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
8493 armnn::IWorkloadFactory &workloadFactory,
8494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008495{
8496 const unsigned int inputShape[] = {4, 3, 1, 1};
8497 const unsigned int outputShape[] = {1, 3, 2, 2};
8498
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008499 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008500
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008501 std::vector<float> expectedOutput({
8502 // Batch 0, Channel 0, Height (2) x Width (2)
8503 1.0f, 4.0f,
8504 7.0f, 10.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008505
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008506 // Batch 0, Channel 1, Height (2) x Width (2)
8507 2.0f, 5.0f,
8508 8.0f, 11.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008509
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008510 // Batch 0, Channel 2, Height (2) x Width (2)
8511 3.0f, 6.0f,
8512 9.0f, 12.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008513 });
8514
8515 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008516 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008517
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008518 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8519 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8520 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008521}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008522
Mike Kelly831faed2018-11-28 11:52:08 +00008523LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008524 armnn::IWorkloadFactory& workloadFactory,
8525 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00008526{
8527 const unsigned int inputShape[] = {4, 1, 1, 1};
8528 const unsigned int outputShape[] = {1, 1, 2, 2};
8529
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008530 std::vector<float> input({
8531 // Batch 0, Height 0, Width (2) x Channel (1)
8532 1.0f, 2.0f, 3.0f, 4.0f
8533 });
Mike Kelly831faed2018-11-28 11:52:08 +00008534
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008535 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00008536
8537 std::vector<unsigned int> blockShape({2, 2});
8538 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8539
8540 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8541 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8542 crops, outputShape, expectedOutput);
8543}
8544
8545LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008546 armnn::IWorkloadFactory& workloadFactory,
8547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00008548{
8549 const unsigned int inputShape[] = {4, 3, 1, 1};
8550 const unsigned int outputShape[] = {1, 3, 2, 2};
8551
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008552 std::vector<float> input({1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00008553
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008554 std::vector<float> expectedOutput({
8555 // Batch 0, Channel 0, Height (2) x Width (2)
8556 1.0f, 7.0f,
8557 2.0f, 8.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00008558
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008559 // Batch 0, Channel 1, Height (2) x Width (2)
8560 3.0f, 9.0f,
8561 4.0f, 10.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00008562
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008563 // Batch 0, Channel 2, Height (2) x Width (2)
8564 5.0f, 11.0f,
8565 6.0f, 12.0f,
8566 });
Mike Kelly831faed2018-11-28 11:52:08 +00008567
8568 std::vector<unsigned int> blockShape({2, 2});
8569 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8570
8571 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8572 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8573 crops, outputShape, expectedOutput);
8574}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008575
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008576LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
8577 armnn::IWorkloadFactory& workloadFactory,
8578 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008579{
8580 const unsigned int inputShape[] = {4, 2, 2, 1};
8581 const unsigned int outputShape[] = {1, 4, 4, 1};
8582
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008583 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
8584 std::vector<uint8_t> expectedOutput({1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008585
8586 std::vector<unsigned int> blockShape({2, 2});
8587 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8588
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00008589 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
8590 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008591}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008592
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008593LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
8594 armnn::IWorkloadFactory& workloadFactory,
8595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8596{
8597 const unsigned int inputShape[] = {4, 1, 1, 1};
8598 const unsigned int outputShape[] = {1, 2, 2, 1};
8599
8600 std::vector<uint8_t> input({
8601 // Batch 0, Height 0, Width (2) x Channel (1)
8602 1, 2, 3, 4
8603 });
8604
8605 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
8606
8607 std::vector<unsigned int> blockShape({2, 2});
8608 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8609
8610 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8611 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8612 crops, outputShape, expectedOutput);
8613}
8614
8615LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
8616 armnn::IWorkloadFactory& workloadFactory,
8617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8618{
8619 const unsigned int inputShape[] = {4, 1, 1, 3};
8620 const unsigned int outputShape[] = {1, 2, 2, 3};
8621
8622 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
8623
8624 std::vector<uint8_t> expectedOutput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
8625
8626 std::vector<unsigned int> blockShape({2, 2});
8627 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8628
8629 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8630 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8631 crops, outputShape, expectedOutput);
8632}
8633
8634
8635LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
8636 armnn::IWorkloadFactory &workloadFactory,
8637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8638{
8639 const unsigned int inputShape[] = {4, 3, 1, 1};
8640 const unsigned int outputShape[] = {1, 3, 2, 2};
8641
8642 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
8643
8644 std::vector<uint8_t> expectedOutput({
8645 // Batch 0, Channel 0, Height (2) x Width (2)
8646 1, 4,
8647 7, 10,
8648
8649 // Batch 0, Channel 1, Height (2) x Width (2)
8650 2, 5,
8651 8, 11,
8652
8653 // Batch 0, Channel 2, Height (2) x Width (2)
8654 3, 6,
8655 9, 12,
8656 });
8657
8658 std::vector<unsigned int> blockShape({2, 2});
8659 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8660
8661 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8662 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8663 crops, outputShape, expectedOutput);
8664}
8665
8666LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
8667 armnn::IWorkloadFactory& workloadFactory,
8668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8669{
8670 const unsigned int inputShape[] = {4, 1, 1, 1};
8671 const unsigned int outputShape[] = {1, 1, 2, 2};
8672
8673 std::vector<uint8_t> input({
8674 // Batch 0, Height 0, Width (2) x Channel (1)
8675 1, 2, 3, 4
8676 });
8677
8678 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
8679
8680 std::vector<unsigned int> blockShape({2, 2});
8681 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8682
8683 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8684 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8685 crops, outputShape, expectedOutput);
8686}
8687
8688LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
8689 armnn::IWorkloadFactory& workloadFactory,
8690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8691{
8692 const unsigned int inputShape[] = {4, 3, 1, 1};
8693 const unsigned int outputShape[] = {1, 3, 2, 2};
8694
8695 std::vector<uint8_t> input({1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12});
8696
8697 std::vector<uint8_t> expectedOutput({
8698 // Batch 0, Channel 0, Height (2) x Width (2)
8699 1, 7,
8700 2, 8,
8701
8702 // Batch 0, Channel 1, Height (2) x Width (2)
8703 3, 9,
8704 4, 10,
8705
8706 // Batch 0, Channel 2, Height (2) x Width (2)
8707 5, 11,
8708 6, 12,
8709 });
8710
8711 std::vector<unsigned int> blockShape({2, 2});
8712 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8713
8714 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8715 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8716 crops, outputShape, expectedOutput);
8717}
8718
8719LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest4(
8720 armnn::IWorkloadFactory& workloadFactory,
8721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8722{
8723 const unsigned int inputShape[] = {8, 1, 1, 3};
8724 const unsigned int outputShape[] = {2, 1, 2, 4};
8725
8726 std::vector<uint8_t> input({
8727 0, 1, 3, 0, 9, 11,
8728 0, 2, 4, 0, 10, 12,
8729 0, 5, 7, 0, 13, 15,
8730 0, 6, 8, 0, 14, 16
8731 });
8732
8733 std::vector<uint8_t> expectedOutput({
8734 1, 2, 3, 4,
8735 5, 6, 7, 8,
8736 9, 10, 11, 12,
8737 13, 14, 15, 16
8738 });
8739
8740 std::vector<unsigned int> blockShape({2, 2});
8741 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
8742
8743 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8744 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8745 crops, outputShape, expectedOutput);
8746}
8747
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008748LayerTestResult<float, 4> StridedSlice4DFloat32Test(
8749 armnn::IWorkloadFactory& workloadFactory,
8750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8751{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008752 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008753}
8754
8755LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
8756 armnn::IWorkloadFactory& workloadFactory,
8757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8758{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008759 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008760}
8761
8762LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
8763 armnn::IWorkloadFactory& workloadFactory,
8764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8765{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008766 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008767}
8768
8769LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
8770 armnn::IWorkloadFactory& workloadFactory,
8771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8772{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008773 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008774}
8775
8776LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
8777 armnn::IWorkloadFactory& workloadFactory,
8778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8779{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008780 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008781}
8782
8783LayerTestResult<float, 3> StridedSlice3DFloat32Test(
8784 armnn::IWorkloadFactory& workloadFactory,
8785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8786{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008787 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008788}
8789
8790LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
8791 armnn::IWorkloadFactory& workloadFactory,
8792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8793{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008794 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008795}
8796
8797LayerTestResult<float, 2> StridedSlice2DFloat32Test(
8798 armnn::IWorkloadFactory& workloadFactory,
8799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8800{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008801 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008802}
8803
8804LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
8805 armnn::IWorkloadFactory& workloadFactory,
8806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8807{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008808 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008809}
8810
8811LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
8812 armnn::IWorkloadFactory& workloadFactory,
8813 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8814{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008815 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008816}
8817
8818LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
8819 armnn::IWorkloadFactory& workloadFactory,
8820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8821{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008822 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008823}
8824
8825LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
8826 armnn::IWorkloadFactory& workloadFactory,
8827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8828{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008829 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008830}
8831
8832LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
8833 armnn::IWorkloadFactory& workloadFactory,
8834 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8835{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008836 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008837}
8838
8839LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
8840 armnn::IWorkloadFactory& workloadFactory,
8841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8842{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008843 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008844}
8845
8846LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
8847 armnn::IWorkloadFactory& workloadFactory,
8848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8849{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008850 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008851}
8852
8853LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
8854 armnn::IWorkloadFactory& workloadFactory,
8855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8856{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008857 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008858}
8859
8860LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
8861 armnn::IWorkloadFactory& workloadFactory,
8862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8863{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008864 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008865}
8866
8867LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
8868 armnn::IWorkloadFactory& workloadFactory,
8869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8870{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008871 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008872}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008873
8874LayerTestResult<float, 4> Debug4DFloat32Test(
8875 armnn::IWorkloadFactory& workloadFactory,
8876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8877{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008878 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008879}
8880
8881LayerTestResult<float, 3> Debug3DFloat32Test(
8882 armnn::IWorkloadFactory& workloadFactory,
8883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8884{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008885 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008886}
8887
8888LayerTestResult<float, 2> Debug2DFloat32Test(
8889 armnn::IWorkloadFactory& workloadFactory,
8890 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8891{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008892 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008893}
8894
8895LayerTestResult<float, 1> Debug1DFloat32Test(
8896 armnn::IWorkloadFactory& workloadFactory,
8897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8898{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008899 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008900}
8901
8902LayerTestResult<uint8_t, 4> Debug4DUint8Test(
8903 armnn::IWorkloadFactory& workloadFactory,
8904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8905{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008906 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008907}
8908
8909LayerTestResult<uint8_t, 3> Debug3DUint8Test(
8910 armnn::IWorkloadFactory& workloadFactory,
8911 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8912{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008913 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008914}
8915
8916LayerTestResult<uint8_t, 2> Debug2DUint8Test(
8917 armnn::IWorkloadFactory& workloadFactory,
8918 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8919{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008920 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008921}
8922
8923LayerTestResult<uint8_t, 1> Debug1DUint8Test(
8924 armnn::IWorkloadFactory& workloadFactory,
8925 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8926{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008927 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008928}
Matteo Martincigh49124022019-01-11 13:25:59 +00008929
narpra014951d842019-01-18 16:53:53 +00008930LayerTestResult<float, 1> Gather1DParamsFloatTest(
8931 armnn::IWorkloadFactory& workloadFactory,
8932 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8933{
8934 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
8935}
8936
8937LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
8938 armnn::IWorkloadFactory& workloadFactory,
8939 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8940{
8941 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8942}
8943
8944LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
8945 armnn::IWorkloadFactory& workloadFactory,
8946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8947{
8948 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
8949}
8950
8951LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
8952 armnn::IWorkloadFactory& workloadFactory,
8953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8954{
8955 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8956}
8957
8958LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
8959 armnn::IWorkloadFactory& workloadFactory,
8960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8961{
8962 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
8963}
8964
8965LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
8966 armnn::IWorkloadFactory& workloadFactory,
8967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8968{
8969 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
8970 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00008971}