blob: ce02fedb983355bdfb8f691487e66b251db7900c [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008#include "TypeUtils.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
33#include "ReshapeTestImpl.hpp"
34#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000035#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000036#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000046
telsoa01c577f2c2018-08-31 09:22:23 +010047// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000048static std::vector<float> ConvInput3x8x16({
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
73});
74
telsoa01c577f2c2018-08-31 09:22:23 +010075// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000076static std::vector<float> Bias2({0, 2});
77
telsoa01c577f2c2018-08-31 09:22:23 +010078// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000079template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +000080boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
81{
82 if(biasEnabled)
83 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000084 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000085 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
86 return bias;
87 }
88 else
89 {
90 return boost::multi_array<T, 1>();
91 }
92}
93
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000094template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000095LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
96 armnn::IWorkloadFactory& workloadFactory,
97 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
98 float qScale,
99 int32_t qOffset,
100 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000101 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000102{
telsoa01c577f2c2018-08-31 09:22:23 +0100103 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000104 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000105 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
106
telsoa01c577f2c2018-08-31 09:22:23 +0100107 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000108 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000109 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
110 QuantizedVector<T>(qScale, qOffset, {
111 1, 1, 1,
112 1, -1, 1,
113 1, 1, 1,
114 1, 1, 1,
115 1, 1, 1,
116
117 0, 0, 0,
118 0, 0, 0,
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122
123 2, 2, 2,
124 2, 2, 2,
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128
129
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135
136 1, 1, 1,
137 1, 1, 1,
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141
142 0, 0, 0,
143 0, 0, 0,
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0
147 })));
148
telsoa01c577f2c2018-08-31 09:22:23 +0100149 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000150 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000151 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
152 QuantizedVector<T>(qScale, qOffset, {
153 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
154 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
155 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
156 -23.5f, -23.5f, -23.5f,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159
160 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
161 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
164 })));
165
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000166 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
167 workloadFactory,
168 memoryManager,
169 input,
170 kernel,
171 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
172 expectedOutput,
173 qScale,
174 qOffset,
175 layout);
telsoa014fcda012018-03-09 14:13:49 +0000176}
177
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000178template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
179 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000180LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
181 armnn::IWorkloadFactory& workloadFactory,
182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
183 float qScale,
184 int32_t qOffset,
185 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000186 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000187{
telsoa01c577f2c2018-08-31 09:22:23 +0100188 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000189
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000191 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000192 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
193
telsoa01c577f2c2018-08-31 09:22:23 +0100194 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000195 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000196 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
197 QuantizedVector<T>(qScale, qOffset, {
198 1, 1, 1,
199 1, -1, 1,
200 1, 1, 1,
201
202 0, 0, 0,
203 0, 0, 0,
204 0, 0, 0,
205
206 2, 2, 2,
207 2, 2, 2,
208 2, 2, 2,
209
210
211 0, 0, 0,
212 0, 0, 0,
213 0, 0, 0,
214
215 1, 1, 1,
216 1, 1, 1,
217 1, 1, 1,
218
219 0, 0, 0,
220 0, 0, 0,
221 0, 0, 0
222 })));
223
telsoa01c577f2c2018-08-31 09:22:23 +0100224 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000225 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000226 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
227 QuantizedVector<T>(qScale, qOffset, {
228 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
229 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
230 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
231 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234
235 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
236 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
241 })));
242
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000243 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
244 workloadFactory,
245 memoryManager,
246 input,
247 kernel,
248 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
249 expectedOutput,
250 qScale,
251 qOffset,
252 layout);
telsoa014fcda012018-03-09 14:13:49 +0000253}
254
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000255template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000256LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
257 armnn::IWorkloadFactory& workloadFactory,
258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
259 float qScale,
260 int32_t qOffset,
261 bool biasEnabled,
262 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100263{
264 // Use common single-batch 5x5 image.
265
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000266 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100267 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
268 {
269 1, 5, 2, 3,
270 8, 7, 3, 6,
271 3, 3, 9, 1
272 });
273
274
275 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000276 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100277 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
278 4, 5, 6,
279 0, 0, 0,
280 3, 2, 1
281 });
282
283 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000284 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100285
286 const std::vector<float> outputData =
287 {
288 23, 41, 33, 21,
289 44, 65, 76, 52,
290 82, 85, 79, 42
291 };
292
293 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
294
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000295 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
296 workloadFactory,
297 memoryManager,
298 input,
299 kernel,
300 boost::multi_array<T, 1>(),
301 expectedOutput,
302 dataLayout,
303 qScale,
304 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100305}
306
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000307template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000308LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
309 armnn::IWorkloadFactory& workloadFactory,
310 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
311 float qScale,
312 int32_t qOffset,
313 bool biasEnabled,
314 const armnn::DataLayout& dataLayout)
315{
316 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000317 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000318 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
319 {
320 1, 5, 2, 3, 5,
321 8, 7, 3, 6, 3,
322 3, 3, 9, 1, 9,
323 4, 1, 8, 1, 3,
324 6, 8, 1, 9, 2
325 });
326
327 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000328 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000329 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
330 {
331 4, 5, 6,
332 0, 0, 0,
333 3, 2, 1
334 });
335
336 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000337 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000338
339 const std::vector<T> outputData =
340 {
341 23, 33, 24,
342 91, 99, 48,
343 26, 50, 19
344 };
345
346 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
347
348 uint32_t padLeft = 1;
349 uint32_t padTop = 1;
350 uint32_t padRight = 1;
351 uint32_t padBottom = 1;
352 uint32_t strideX = 2;
353 uint32_t strideY = 2;
354
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000355 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
356 workloadFactory,
357 memoryManager,
358 input,
359 kernel,
360 boost::multi_array<T, 1>(),
361 expectedOutput,
362 dataLayout,
363 qScale,
364 qOffset,
365 padLeft,
366 padTop,
367 padRight,
368 padBottom,
369 strideX,
370 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000371}
372
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000373LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
374 armnn::IWorkloadFactory& workloadFactory,
375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
376 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000377 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000378{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000379 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
380 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000381}
382
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000383LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
384 armnn::IWorkloadFactory& workloadFactory,
385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
386 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000387 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000388{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000389 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
390 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000391}
392
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000393LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
394 armnn::IWorkloadFactory& workloadFactory,
395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
396 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000397 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000398{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000399 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
400 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000401}
402
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000403LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
404 armnn::IWorkloadFactory& workloadFactory,
405 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
406 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100407{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000408 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
409 workloadFactory,
410 memoryManager,
411 0.f,
412 0,
413 biasEnabled,
414 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100415}
416
Mike Kelly7332ed82018-12-20 17:03:06 +0000417LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
418 armnn::IWorkloadFactory& workloadFactory,
419 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
420 bool biasEnabled,
421 const armnn::DataLayout layout)
422{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000423 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
424 workloadFactory,
425 memoryManager,
426 0.f,
427 0,
428 biasEnabled,
429 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000430}
431
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000432LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
433 armnn::IWorkloadFactory& workloadFactory,
434 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
435 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000436 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000437{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000438 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
439 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000440}
441
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000442template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
443 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000444LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
445 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000447 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000448 float qScale,
449 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000450{
telsoa01c577f2c2018-08-31 09:22:23 +0100451 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000452 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000453 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
454 QuantizedVector<T>(qScale, qOffset, {
455 11,21,31,
456 12,22,32,
457 13,23,33
458 })));
459
telsoa01c577f2c2018-08-31 09:22:23 +0100460 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000461 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000462 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
463 QuantizedVector<T>(qScale, qOffset, {
464 -11,-21,
465 -12,-22,
466 })));
467
telsoa01c577f2c2018-08-31 09:22:23 +0100468// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000469// Manually calculated like this:
470//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
471//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
472//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
473//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
474//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
475//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
476//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000477 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000478 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
479 QuantizedVector<T>(qScale, qOffset, {
480 0, 0, 0, 0, 0, 0,
481 -242, -594, -934, -372, 0, 0,
482 -495, -1190, -1850, -725, 0, 0,
483 -538, -1256, -1916, -748, 0, 0,
484 -273, -626, -946, -363, 0, 0,
485 0, 0, 0, 0, 0, 0,
486 0, 0, 0, 0, 0, 0,
487 0, 0, 0, 0, 0, 0
488 })));
489
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000490 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
491 workloadFactory,
492 memoryManager,
493 input,
494 kernel,
495 GetBias2<ArmnnBType>(false, qScale, qOffset),
496 expectedOutput,
497 qScale,
498 qOffset,
499 layout,
500 1, // Padding left.
501 2, // Padding top.
502 3, // Padding right.
503 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000504}
505
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000506template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
507 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000508LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
509 armnn::IWorkloadFactory& workloadFactory,
510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000511 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000512 float qScale,
513 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000514{
telsoa01c577f2c2018-08-31 09:22:23 +0100515 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000516 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000517 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
518 QuantizedVector<T>(qScale, qOffset, {
519 11,21,31,41,51,
520 12,22,32,42,52,
521 13,23,33,43,53,
522 14,24,34,44,54,
523 15,25,35,45,55,
524 })));
525
telsoa01c577f2c2018-08-31 09:22:23 +0100526 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000527 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000528 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
529 QuantizedVector<T>(qScale, qOffset, {
530 -11,-21,-31,-41,
531 -12,-22,-32,-42,
532 -13,-23,-33,-43,
533 -14,-24,-34,-44,
534 })));
535
telsoa01c577f2c2018-08-31 09:22:23 +0100536 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000537 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000538 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
539 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
540 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000541 -7140, -10580, -13940, -9300, -5230,
542 -9590, -14120, -18520, -12290, -6860,
543 -9980, -14560, -18960, -12560, -7000,
544 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100545 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000546 })));
547
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000548 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
549 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000550 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000551 input,
552 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000553 GetBias2<ArmnnBType>(false, qScale, qOffset),
telsoa014fcda012018-03-09 14:13:49 +0000554 expectedOutput,
555 qScale,
556 qOffset,
narpra015f703182018-10-26 16:24:58 +0100557 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100558 1, // Padding left.
559 1, // Padding top.
560 2, // Padding right.
561 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100562}
563
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000564template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
565 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000566LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
567 armnn::IWorkloadFactory& workloadFactory,
568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
569 float qScale,
570 int32_t qOffset,
571 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000572 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100573{
telsoa01c577f2c2018-08-31 09:22:23 +0100574 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000575 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100576 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
577 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
578 0, 1, 2, 3, 4,
579 5, 6, 7, 8, 9,
580 10, 11, 12, 13, 14,
581 15, 16, 17, 18, 19,
582 20, 21, 22, 23, 24,
583
584 25, 26, 27, 28, 29,
585 30, 31, 32, 33, 34,
586 35, 36, 37, 38, 39,
587 40, 41, 42, 43, 44,
588 45, 46, 47, 48, 49
589 })));
590
telsoa01c577f2c2018-08-31 09:22:23 +0100591 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000592 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100593 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
594 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
595 32, 31, 30, 29,
596 28, 27, 26, 25,
597 24, 23, 22, 21,
598 20, 19, 18, 17,
599
600 16, 15, 14, 13,
601 12, 11, 10, 9,
602 8, 7, 6, 5,
603 4, 3, 2, 1
604 })));
605
telsoa01c577f2c2018-08-31 09:22:23 +0100606 // Expected output is 1 batch of a 2-channel 5x5 image.
607 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000608 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100609 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
610 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
611 1062, 1580, 1850, 1530, 1117,
612 2140, 3108, 3500, 2842, 2042,
613 3580, 5068, 5460, 4342, 3062,
614 3618, 5072, 5390, 4248, 2971,
615 3074, 4282, 4510, 3533, 2457,
616 1550, 2284, 2362, 1955, 1428,
617 2910, 4206, 4342, 3528, 2536,
618 3390, 4886, 5022, 4068, 2916,
619 3566, 5056, 5182, 4133, 2922,
620 3100, 4352, 4452, 3517, 2465
621 })));
622
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000623 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
624 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000625 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100626 input,
627 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000628 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
surmeh013537c2c2018-05-18 16:31:43 +0100629 expectedOutput,
630 qScale,
631 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100632 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100633 1, // Padding left.
634 1, // Padding top.
635 2, // Padding right.
636 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100637 1, // strideX
638 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000639}
640
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000641template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
642 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000643LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
644 armnn::IWorkloadFactory& workloadFactory,
645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
646 float qScale,
647 int32_t qOffset,
648 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100649{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000650 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100651 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
652 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
653 0, 25,
654 1, 26,
655 2, 27,
656 3, 28,
657 4, 29,
658
659 5, 30,
660 6, 31,
661 7, 32,
662 8, 33,
663 9, 34,
664
665 10, 35,
666 11, 36,
667 12, 37,
668 13, 38,
669 14, 39,
670
671 15, 40,
672 16, 41,
673 17, 42,
674 18, 43,
675 19, 44,
676
677 20, 45,
678 21, 46,
679 22, 47,
680 23, 48,
681 24, 49
682 })));
683
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000684 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100685 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
686 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
Matteo Martincigh747ef822018-12-18 09:26:39 +0000687 32, 31, 30, 29,
688 28, 27, 26, 25,
689 24, 23, 22, 21,
690 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100691
Matteo Martincigh747ef822018-12-18 09:26:39 +0000692 16, 15, 14, 13,
693 12, 11, 10, 9,
694 8, 7, 6, 5,
695 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +0100696 })));
697
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000698 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100699 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
700 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
701 1062, 1550,
702 1580, 2284,
703 1850, 2362,
704 1530, 1955,
705 1117, 1428,
706
707 2140, 2910,
708 3108, 4206,
709 3500, 4342,
710 2842, 3528,
711 2042, 2536,
712
713 3580, 3390,
714 5068, 4886,
715 5460, 5022,
716 4342, 4068,
717 3062, 2916,
718
719 3618, 3566,
720 5072, 5056,
721 5390, 5182,
722 4248, 4133,
723 2971, 2922,
724
725 3074, 3100,
726 4282, 4352,
727 4510, 4452,
728 3533, 3517,
729 2457, 2465
730 })));
731
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000732 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
733 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000734 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100735 input,
736 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000737 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
Nikhil Rajcec6b652018-10-12 13:51:57 +0100738 expectedOutput,
739 qScale,
740 qOffset,
741 1, // Padding left.
742 1, // Padding top.
743 2, // Padding right.
744 2, // Padding bottom.
745 1, // strideX
746 1); // strideY
747}
748
telsoa014fcda012018-03-09 14:13:49 +0000749LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000750Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
751 armnn::IWorkloadFactory& workloadFactory,
752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000753 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000754{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000755 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
756 <armnn::DataType::Float32, armnn::DataType::Float32>(
757 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000758}
759
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000760LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
761 armnn::IWorkloadFactory& workloadFactory,
762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000763 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000764{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000765 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000766 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000767}
768
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000769LayerTestResult<float, 4> DepthwiseConvolution2dTest(
770 armnn::IWorkloadFactory& workloadFactory,
771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
772 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000773 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000774{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000775 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000776 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000777}
778
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000779LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
780 armnn::IWorkloadFactory& workloadFactory,
781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
782 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100783{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000784 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
785 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100786}
787
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000788LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
789 armnn::IWorkloadFactory& workloadFactory,
790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
791 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000792 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000793{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000794 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000795 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000796}
797
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000798LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
799 armnn::IWorkloadFactory& workloadFactory,
800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
801 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000802 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100803{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000804 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000805 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100806}
807
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000808LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
809 armnn::IWorkloadFactory& workloadFactory,
810 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
811 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000812 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000813{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000814 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000815 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000816}
817
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000818LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
819 armnn::IWorkloadFactory& workloadFactory,
820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
821 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000822 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000823{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000824 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000825 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000826}
827
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000828LayerTestResult<float, 4> Convolution1dTest(
829 armnn::IWorkloadFactory& workloadFactory,
830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
831 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000832{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000833 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
834 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000835}
836
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000837LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
838 armnn::IWorkloadFactory& workloadFactory,
839 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
840 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000841{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000842 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
843 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000844}
845
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000846LayerTestResult<float,4> CompareConvolution2dTest(
847 armnn::IWorkloadFactory& workloadFactory,
848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
849 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000850{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000851 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
852 workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000853}
854
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000855LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000856 armnn::IWorkloadFactory& workloadFactory,
857 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
858 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000859 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000860{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000861 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
862 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000863}
864
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000865LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
866 armnn::IWorkloadFactory& workloadFactory,
867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
868 armnn::IWorkloadFactory& refWorkloadFactory,
869 const armnn::DataLayout layout)
870{
871 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
872 workloadFactory, memoryManager, refWorkloadFactory, layout);
873}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000874
875LayerTestResult<float,4> SimpleNormalizationAcrossTest(
876 armnn::IWorkloadFactory& workloadFactory,
877 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000878{
879 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
880 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000881 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000882}
883
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000884LayerTestResult<float,4> SimpleNormalizationWithinTest(
885 armnn::IWorkloadFactory& workloadFactory,
886 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000887{
888 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
889 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000890 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000891}
892
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000893LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
894 armnn::IWorkloadFactory& workloadFactory,
895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +0100896{
897 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
898 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000899 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100900}
901
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000902LayerTestResult<float,2> SimpleSoftmaxTest(
903 armnn::IWorkloadFactory& workloadFactory,
904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
905 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000906{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000907 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000908}
909
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000910LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
911 armnn::IWorkloadFactory& workloadFactory,
912 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
913 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000914{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000915 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000916}
917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000918LayerTestResult<float,4> CompareNormalizationTest(
919 armnn::IWorkloadFactory& workloadFactory,
920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
921 armnn::IWorkloadFactory& refWorkloadFactory,
922 armnn::NormalizationAlgorithmChannel normChannel,
923 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000924{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000925 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000926}
927
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000928LayerTestResult<float,2> CompareSoftmaxTest(
929 armnn::IWorkloadFactory& workloadFactory,
930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000931 armnn::IWorkloadFactory& refWorkloadFactory,
932 float beta)
933{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000934 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
935 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000936}
937
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000938LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
939 armnn::IWorkloadFactory& workloadFactory,
940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000941 armnn::IWorkloadFactory& refWorkloadFactory,
942 float beta)
943{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000944 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
945 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000946}
947
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000948std::vector<LayerTestResult<float,3>> SplitterTest(
949 armnn::IWorkloadFactory& workloadFactory,
950 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000951{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000952 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000953}
954
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000955std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
956 armnn::IWorkloadFactory& workloadFactory,
957 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000958{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000959 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000960}
961
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000962LayerTestResult<float, 3> CopyViaSplitterTest(
963 armnn::IWorkloadFactory& workloadFactory,
964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000965{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000966 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000967}
968
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000969LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
970 armnn::IWorkloadFactory& workloadFactory,
971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000972{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000973 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000974}
975
telsoa01c577f2c2018-08-31 09:22:23 +0100976LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000977 armnn::IWorkloadFactory& workloadFactory,
978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100979{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000980 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +0100981 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
982 { 2., 3., 3., 4. }));
983
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000984 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +0100985 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
986 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
987 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000988 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
989 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100990}
991
992LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000993 armnn::IWorkloadFactory& workloadFactory,
994 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100995{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000996 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +0100997 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
998 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
999 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1000
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001001 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001002 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1003 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1004 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1005 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1006 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1007 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1008 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1009 0.02168f}));
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00001010 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001011}
1012
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001013LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1014 armnn::IWorkloadFactory& workloadFactory,
1015 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001016{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001017 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001018 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1019 {2., 3., 3., 4.}));
1020
1021
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001022 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001023 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1024 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1025 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1026
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001027 return LstmNoCifgNoPeepholeNoProjectionTestImpl(
1028 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001029}
1030
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001031LayerTestResult<float,3> MergerTest(
1032 armnn::IWorkloadFactory& workloadFactory,
1033 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001034{
surmeh013537c2c2018-05-18 16:31:43 +01001035 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001036 unsigned int outputHeight = 6;
1037 unsigned int outputChannels = 3;
1038
surmeh013537c2c2018-05-18 16:31:43 +01001039 unsigned int inputWidth1 = 3;
1040 unsigned int inputHeight1 = 6;
1041 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001042
surmeh013537c2c2018-05-18 16:31:43 +01001043 unsigned int inputWidth2 = 3;
1044 unsigned int inputHeight2 = 6;
1045 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001046
telsoa01c577f2c2018-08-31 09:22:23 +01001047 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001048 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1049 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1050 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001051
1052 LayerTestResult<float,3> ret(outputTensorInfo);
1053
telsoa014fcda012018-03-09 14:13:49 +00001054 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001055 {
1056 1.0f, 2.0f, 3.0f,
1057 4.0f, 5.0f, 6.0f,
1058 7.0f, 8.0f, 9.0f,
1059 10.0f, 11.0f, 12.0f,
1060 13.0f, 14.0f, 15.0f,
1061 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001062
surmeh013537c2c2018-05-18 16:31:43 +01001063 19.0f, 20.0f, 21.0f,
1064 22.0f, 23.0f, 24.0f,
1065 25.0f, 26.0f, 27.0f,
1066 28.0f, 29.0f, 30.0f,
1067 31.0f, 32.0f, 33.0f,
1068 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001069
surmeh013537c2c2018-05-18 16:31:43 +01001070 37.0f, 38.0f, 39.0f,
1071 40.0f, 41.0f, 42.0f,
1072 43.0f, 44.0f, 45.0f,
1073 46.0f, 47.0f, 48.0f,
1074 49.0f, 50.0f, 51.0f,
1075 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001076 })
1077 );
1078
telsoa014fcda012018-03-09 14:13:49 +00001079 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1080 {
surmeh013537c2c2018-05-18 16:31:43 +01001081 1.0f, 2.0f, 3.0f,
1082 4.0f, 5.0f, 6.0f,
1083 7.0f, 8.0f, 9.0f,
1084 10.0f, 11.0f, 12.0f,
1085 13.0f, 14.0f, 15.0f,
1086 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001087
surmeh013537c2c2018-05-18 16:31:43 +01001088 19.0f, 20.0f, 21.0f,
1089 22.0f, 23.0f, 24.0f,
1090 25.0f, 26.0f, 27.0f,
1091 28.0f, 29.0f, 30.0f,
1092 31.0f, 32.0f, 33.0f,
1093 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001094 })
1095 );
1096
1097 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1098 {
surmeh013537c2c2018-05-18 16:31:43 +01001099 37.0f, 38.0f, 39.0f,
1100 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001101 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001102 46.0f, 47.0f, 48.0f,
1103 49.0f, 50.0f, 51.0f,
1104 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001105 })
1106 );
1107
telsoa01c577f2c2018-08-31 09:22:23 +01001108 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00001109 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
1110
telsoa01c577f2c2018-08-31 09:22:23 +01001111 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00001112 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
1113
telsoa014fcda012018-03-09 14:13:49 +00001114 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1115
1116 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1117
1118 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1119 subTensorsSupported ?
1120 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1121 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1122
1123 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1124 subTensorsSupported ?
1125 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1126 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1127
telsoa014fcda012018-03-09 14:13:49 +00001128 armnn::MergerQueueDescriptor data;
1129 armnn::WorkloadInfo info;
1130 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1131 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001132 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1133
1134 data.m_ViewOrigins.push_back(window1);
1135 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001136
1137 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
1138
1139 inputHandle1->Allocate();
1140 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001141 outputHandle->Allocate();
1142
1143 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1144 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001145
1146 workload->Execute();
1147
1148 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1149
1150 return ret;
1151}
1152
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001153LayerTestResult<float,4> AdditionTest(
1154 armnn::IWorkloadFactory& workloadFactory,
1155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001156{
1157 unsigned int batchSize = 2;
1158 unsigned int channels = 2;
1159 unsigned int height = 2;
1160 unsigned int width = 3;
1161
1162 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1163 armnn::TensorInfo outputTensorInfo;
1164
1165 unsigned int shape[] = {batchSize, channels, height, width};
1166
1167 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1168 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1169 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1170
1171
1172 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1173 {
1174 0.0f, 2.0f, 1.0f,
1175 0.2f, 1.0f, 2.0f,
1176
1177 1.0f, 2.0f, 1.0f,
1178 0.2f, 1.0f, 2.0f,
1179
1180 0.0f, 2.0f, 1.0f,
1181 4.2f, 1.0f, 2.0f,
1182
1183 0.0f, 0.0f, 1.0f,
1184 0.2f, 1.0f, 2.0f,
1185 }));
1186
1187 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1188 {
1189 1.0f, 2.0f, 1.0f,
1190 0.0f, 1.0f, 2.0f,
1191
1192 1.0f, 2.0f, -2.0f,
1193 0.2f, 1.0f, 2.0f,
1194
1195 0.0f, 2.0f, 1.0f,
1196 4.2f, 0.0f, -3.0f,
1197
1198 0.0f, 0.0f, 1.0f,
1199 0.7f, 1.0f, 5.0f,
1200 }));
1201
1202 LayerTestResult<float,4> ret(outputTensorInfo);
1203 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1204 {
1205 1.0f, 4.0f, 2.0f,
1206 0.2f, 2.0f, 4.0f,
1207
1208 2.0f, 4.0f, -1.0f,
1209 0.4f, 2.0f, 4.0f,
1210
1211 0.0f, 4.0f, 2.0f,
1212 8.4f, 1.0f, -1.0f,
1213
1214 0.0f, 0.0f, 2.0f,
1215 0.9f, 2.0f, 7.0f,
1216 }));
1217
1218 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1219 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1220 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1221
1222 armnn::AdditionQueueDescriptor data;
1223 armnn::WorkloadInfo info;
1224 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1225 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1226 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1227
1228 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1229
1230 inputHandle1->Allocate();
1231 inputHandle2->Allocate();
1232 outputHandle->Allocate();
1233
1234 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1235 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1236
1237 workload->Execute();
1238
1239 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1240
1241 return ret;
1242}
1243
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001244template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001245LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1246 armnn::IWorkloadFactory& workloadFactory,
1247 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001248 float qScale,
1249 int32_t qOffset)
1250{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001251 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
1252 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
1253 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001254
1255 if (armnn::IsQuantizedType<T>())
1256 {
1257 inputTensorInfo1.SetQuantizationScale(qScale);
1258 inputTensorInfo1.SetQuantizationOffset(qOffset);
1259 inputTensorInfo2.SetQuantizationScale(qScale);
1260 inputTensorInfo2.SetQuantizationOffset(qOffset);
1261 outputTensorInfo.SetQuantizationScale(qScale);
1262 outputTensorInfo.SetQuantizationOffset(qOffset);
1263 }
1264
1265 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1266 {
1267 0.0f,
1268 1.0f,
1269
1270 2.0f,
1271 3.0f,
1272
1273 4.0f,
1274 5.0f,
1275 }));
1276
1277 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1278 {
1279 0.5f, 1.5f, 2.5f,
1280 3.5f, 4.5f, 5.5f,
1281 }));
1282
1283 LayerTestResult<T,4> ret(outputTensorInfo);
1284 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1285 {
1286 0.5f, 1.5f, 2.5f,
1287 4.5f, 5.5f, 6.5f,
1288
1289 2.5f, 3.5f, 4.5f,
1290 6.5f, 7.5f, 8.5f,
1291
1292 4.5f, 5.5f, 6.5f,
1293 8.5f, 9.5f, 10.5f,
1294 }));
1295
1296 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1297 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1298 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1299
1300 armnn::AdditionQueueDescriptor data;
1301 armnn::WorkloadInfo info;
1302 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1303 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1304 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1305
1306 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1307
1308 inputHandle1->Allocate();
1309 inputHandle2->Allocate();
1310 outputHandle->Allocate();
1311
1312 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1313 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1314
1315 workload->Execute();
1316
1317 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1318
1319 return ret;
1320}
1321
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001322template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001323LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1324 armnn::IWorkloadFactory& workloadFactory,
1325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001326 float qScale,
1327 int32_t qOffset)
1328{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001329 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
1330 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
1331 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001332
1333 if (armnn::IsQuantizedType<T>())
1334 {
1335 inputTensorInfo1.SetQuantizationScale(qScale);
1336 inputTensorInfo1.SetQuantizationOffset(qOffset);
1337 inputTensorInfo2.SetQuantizationScale(qScale);
1338 inputTensorInfo2.SetQuantizationOffset(qOffset);
1339 outputTensorInfo.SetQuantizationScale(qScale);
1340 outputTensorInfo.SetQuantizationOffset(qOffset);
1341 }
1342
1343 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1344 {
1345 0.0f, 1.0f, 2.0f,
1346 3.0f, 4.0f, 5.0f,
1347 6.0f, 7.0f, 8.0f,
1348 9.0f, 10.0f, 11.0f,
1349 12.0f, 13.0f, 14.0f,
1350 15.0f, 16.0f, 17.0f,
1351 }));
1352
1353 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1354 {
1355 0.5f,
1356 }));
1357
1358 LayerTestResult<T,4> ret(outputTensorInfo);
1359 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1360 {
1361 0.5f, 1.5f, 2.5f,
1362 3.5f, 4.5f, 5.5f,
1363 6.5f, 7.5f, 8.5f,
1364 9.5f, 10.5f, 11.5f,
1365 12.5f, 13.5f, 14.5f,
1366 15.5f, 16.5f, 17.5f,
1367 }));
1368
1369 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1370 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1371 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1372
1373 armnn::AdditionQueueDescriptor data;
1374 armnn::WorkloadInfo info;
1375 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1376 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1377 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1378
1379 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1380
1381 inputHandle1->Allocate();
1382 inputHandle2->Allocate();
1383 outputHandle->Allocate();
1384
1385 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1386 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1387
1388 workload->Execute();
1389
1390 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1391
1392 return ret;
1393}
1394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001395LayerTestResult<float, 4> AdditionBroadcastTest(
1396 armnn::IWorkloadFactory& workloadFactory,
1397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001398{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001399 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
1400 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001401}
1402
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001403LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1404 armnn::IWorkloadFactory& workloadFactory,
1405 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001406{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001407 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
1408 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001409}
1410
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001411LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1412 armnn::IWorkloadFactory& workloadFactory,
1413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001414{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001415 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
1416 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001417}
1418
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001419LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1420 armnn::IWorkloadFactory& workloadFactory,
1421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001422{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001423 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
1424 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001425}
1426
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001427LayerTestResult<float,4> CompareAdditionTest(
1428 armnn::IWorkloadFactory& workloadFactory,
1429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1430 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001431{
1432 unsigned int batchSize = 4;
1433 unsigned int channels = 1;
1434 unsigned int height = 2;
1435 unsigned int width = 3;
1436
1437 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1438 armnn::TensorInfo outputTensorInfo;
1439
1440 unsigned int shape[] = {batchSize, channels, height, width};
1441
1442 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1443 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1444 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1445
1446 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1447 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1448
1449 LayerTestResult<float,4> ret(outputTensorInfo);
1450
1451 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1452 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1453 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1454
1455 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1456 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1457 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1458
1459 armnn::AdditionQueueDescriptor data;
1460 armnn::WorkloadInfo info;
1461 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1462 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1463 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1464
1465 armnn::AdditionQueueDescriptor refData = data;
1466 armnn::WorkloadInfo refInfo = info;
1467 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1468 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1469 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1470
1471 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1472 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1473
1474 inputHandle1->Allocate();
1475 inputHandle2->Allocate();
1476 outputHandle->Allocate();
1477 inputHandle1Ref->Allocate();
1478 inputHandle2Ref->Allocate();
1479 outputHandleRef->Allocate();
1480
1481 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1482 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1483 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1484 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1485
1486 workload->Execute();
1487 workloadRef->Execute();
1488
1489 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1490 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1491
1492 return ret;
1493}
1494
surmeh01bceff2f2018-03-29 16:29:27 +01001495namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001496template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001497LayerTestResult<T, 4> DivisionTestHelper(
1498 armnn::IWorkloadFactory& workloadFactory,
1499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1500 const unsigned int shape0[4],
1501 const std::vector<T>& values0,
1502 float scale0,
1503 int32_t offset0,
1504 const unsigned int shape1[4],
1505 const std::vector<T> & values1,
1506 float scale1,
1507 int32_t offset1,
1508 const unsigned int outShape[4],
1509 const std::vector<T> & outValues,
1510 float outScale,
1511 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001512{
1513 auto dataType = (std::is_same<T, uint8_t>::value ?
1514 armnn::DataType::QuantisedAsymm8 :
1515 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001516
David Beck5cd01f32018-09-12 16:00:08 +01001517 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1518 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1519 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001520
David Beck5cd01f32018-09-12 16:00:08 +01001521 inputTensorInfo0.SetQuantizationScale(scale0);
1522 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001523
David Beck5cd01f32018-09-12 16:00:08 +01001524 inputTensorInfo1.SetQuantizationScale(scale1);
1525 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001526
David Beck5cd01f32018-09-12 16:00:08 +01001527 outputTensorInfo.SetQuantizationScale(outScale);
1528 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001529
David Beck5cd01f32018-09-12 16:00:08 +01001530 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1531 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001532
David Beck5cd01f32018-09-12 16:00:08 +01001533 LayerTestResult<T, 4> result(outputTensorInfo);
1534 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001535
David Beck5cd01f32018-09-12 16:00:08 +01001536 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1537 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1538 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001539
David Beck5cd01f32018-09-12 16:00:08 +01001540 armnn::DivisionQueueDescriptor data;
1541 armnn::WorkloadInfo info;
1542 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1543 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1544 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001545
David Beck5cd01f32018-09-12 16:00:08 +01001546 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001547
David Beck5cd01f32018-09-12 16:00:08 +01001548 inputHandle0->Allocate();
1549 inputHandle1->Allocate();
1550 outputHandle->Allocate();
1551
1552 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1553 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1554
David Beck5cd01f32018-09-12 16:00:08 +01001555 workload->Execute();
1556
1557 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1558
1559 return result;
1560}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001561} // anonymous namespace
1562
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001563LayerTestResult<float,4> DivisionByZeroTest(
1564 armnn::IWorkloadFactory& workloadFactory,
1565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001566{
1567 const unsigned int width = 2;
1568 const unsigned int height = 2;
1569 const unsigned int channelCount = 2;
1570 const unsigned int batchSize = 2;
1571
1572 unsigned int shape[] = { batchSize, channelCount, height, width };
1573
1574 std::vector<float> input0({
1575 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1576 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1577
1578 std::vector<float> input1({
1579 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1580 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1581
1582 std::vector<float> output({
1583 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1584 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1585
David Beck5cd01f32018-09-12 16:00:08 +01001586 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001587 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001588 shape, input0, 1.0f, 0,
1589 shape, input1, 1.0f, 0,
1590 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001591}
1592
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001593LayerTestResult<float,4> DivisionTest(
1594 armnn::IWorkloadFactory& workloadFactory,
1595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001596{
1597 const unsigned int width = 2;
1598 const unsigned int height = 2;
1599 const unsigned int channelCount = 2;
1600 const unsigned int batchSize = 2;
1601
1602 unsigned int shape[] = { batchSize, channelCount, height, width };
1603
1604 std::vector<float> input0({
1605 2, 2, 2, 2, 3, 3, 3, 3,
1606 4, 4, 4, 4, 5, 5, 5, 5 });
1607
1608 std::vector<float> input1({
1609 1, 1, 1, 1, 2, 2, 2, 2,
1610 4, 4, 4, 4, 4, 4, 4, 4 });
1611
1612 std::vector<float> output({
1613 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1614 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1615
David Beck5cd01f32018-09-12 16:00:08 +01001616
1617 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001618 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001619 shape, input0, 1.0f, 0,
1620 shape, input1, 1.0f, 0,
1621 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001622}
1623
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001624LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1625 armnn::IWorkloadFactory& workloadFactory,
1626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001627{
1628 unsigned int shape0[] = { 1, 2, 2, 2 };
1629 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1630
1631 unsigned int shape1[] = { 1, 1, 1, 1 };
1632 std::vector<float> input1({ 2 });
1633
1634 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1635
David Beck5cd01f32018-09-12 16:00:08 +01001636
1637 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001638 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001639 shape0, input0, 1.0f, 0,
1640 shape1, input1, 1.0f, 0,
1641 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001642}
1643
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001644LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1645 armnn::IWorkloadFactory& workloadFactory,
1646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001647{
1648 unsigned int shape0[] = { 1, 3, 3, 2 };
1649 std::vector<float> input0({
1650 1, 4, 3, 8, 5, 12,
1651 7, 16, 9, 20, 11, 24,
1652 13, 28, 15, 32, 17, 36});
1653
1654 unsigned int shape1[] = { 1, 1, 1, 2 };
1655 std::vector<float> input1({ 1, 2 });
1656
1657 std::vector<float> output({
1658 1, 2, 3, 4, 5, 6,
1659 7, 8, 9, 10, 11, 12,
1660 13, 14, 15, 16, 17, 18});
1661
David Beck5cd01f32018-09-12 16:00:08 +01001662 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001663 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001664 shape0, input0, 1.0f, 0,
1665 shape1, input1, 1.0f, 0,
1666 shape0, output, 1.0f, 0);
1667}
1668
1669
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001670LayerTestResult<uint8_t,4> DivisionUint8Test(
1671 armnn::IWorkloadFactory& workloadFactory,
1672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001673{
1674 const unsigned int width = 2;
1675 const unsigned int height = 2;
1676 const unsigned int channelCount = 2;
1677 const unsigned int batchSize = 2;
1678
1679 unsigned int shape[] = { batchSize, channelCount, height, width };
1680
1681 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1682 4, 4, 4, 4, 5, 5, 5, 5 });
1683
1684 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1685 4, 4, 4, 4, 4, 4, 4, 4 });
1686
1687 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1688 4, 4, 4, 4, 5, 5, 5, 5});
1689
1690
1691 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001692 memoryManager,
1693 shape, input0, 1.0f, 0,
1694 shape, input1, 1.0f, 0,
1695 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001696}
1697
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001698LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1699 armnn::IWorkloadFactory& workloadFactory,
1700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001701{
1702 unsigned int shape0[] = { 1, 2, 2, 2 };
1703 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1704
1705 unsigned int shape1[] = { 1, 1, 1, 1 };
1706 std::vector<uint8_t> input1({ 2 });
1707
1708 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1709
1710 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001711 memoryManager,
1712 shape0, input0, 1.0f, 0,
1713 shape1, input1, 1.0f, 0,
1714 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001715}
1716
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001717LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1718 armnn::IWorkloadFactory& workloadFactory,
1719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001720{
1721 unsigned int shape0[] = { 1, 3, 3, 2 };
1722 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1723 7, 16, 9, 20, 11, 24,
1724 13, 28, 15, 32, 17, 36});
1725
1726 unsigned int shape1[] = { 1, 1, 1, 2 };
1727 std::vector<uint8_t> input1({ 1, 2 });
1728
1729 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1730 7, 8, 9, 10, 11, 12,
1731 13, 14, 15, 16, 17, 18});
1732
1733 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001734 memoryManager,
1735 shape0, input0, 1.0f, 0,
1736 shape1, input1, 1.0f, 0,
1737 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001738}
1739
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001740template<typename DescriptorType>
1741std::unique_ptr<armnn::IWorkload> CreateWorkload(
1742 const armnn::IWorkloadFactory& workloadFactory,
1743 const armnn::WorkloadInfo& info,
1744 const DescriptorType& descriptor)
1745{
1746 return CreateWorkload(workloadFactory, info, descriptor);
1747};
1748
1749template<>
1750std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
1751 const armnn::IWorkloadFactory& workloadFactory,
1752 const armnn::WorkloadInfo& info,
1753 const armnn::MaximumQueueDescriptor& descriptor)
1754{
1755 return workloadFactory.CreateMaximum(descriptor, info);
1756}
1757
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001758template<>
1759std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
1760 const armnn::IWorkloadFactory& workloadFactory,
1761 const armnn::WorkloadInfo& info,
1762 const armnn::MinimumQueueDescriptor& descriptor)
1763{
1764 return workloadFactory.CreateMinimum(descriptor, info);
1765}
1766
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001767template<>
1768std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
1769 const armnn::IWorkloadFactory& workloadFactory,
1770 const armnn::WorkloadInfo& info,
1771 const armnn::EqualQueueDescriptor& descriptor)
1772{
1773 return workloadFactory.CreateEqual(descriptor, info);
1774}
1775
FrancisMurtagh878f0232018-12-19 10:56:15 +00001776template<>
1777std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
1778 const armnn::IWorkloadFactory& workloadFactory,
1779 const armnn::WorkloadInfo& info,
1780 const armnn::GreaterQueueDescriptor& descriptor)
1781{
1782 return workloadFactory.CreateGreater(descriptor, info);
1783}
1784
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001785namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00001786
1787template <typename Descriptor,
1788 armnn::DataType ArmnnTypeInput,
1789 armnn::DataType ArmnnTypeOutput,
1790 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
1791 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
1792LayerTestResult<TOutput, 4> ElementwiseTestHelper(
1793 armnn::IWorkloadFactory & workloadFactory,
1794 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
1795 const unsigned int shape0[4], std::vector<TInput> values0,
1796 const unsigned int shape1[4], std::vector<TInput> values1,
1797 const unsigned int outShape[4], std::vector<TOutput> outValues,
1798 float qScale = 0.0f, int qOffset = 0)
1799{
1800 const size_t dimensionCount = 4;
1801 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
1802 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
1803 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
1804
1805 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
1806 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
1807
1808 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001809 {
kevmay012b4d88e2019-01-24 14:05:09 +00001810 inputTensorInfo0.SetQuantizationScale(qScale);
1811 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001812
kevmay012b4d88e2019-01-24 14:05:09 +00001813 inputTensorInfo1.SetQuantizationScale(qScale);
1814 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001815
kevmay012b4d88e2019-01-24 14:05:09 +00001816 outputTensorInfo.SetQuantizationScale(qScale);
1817 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001818 }
kevmay012b4d88e2019-01-24 14:05:09 +00001819
1820 LayerTestResult<TOutput,4> ret(outputTensorInfo);
1821
1822 if(ArmnnTypeOutput == armnn::DataType::Boolean)
1823 {
1824 ret.compareBoolean = true;
1825 }
1826
1827 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1828 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1829 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1830
1831 Descriptor data;
1832 armnn::WorkloadInfo info;
1833 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1834 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1835 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1836 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
1837
1838 inputHandle0->Allocate();
1839 inputHandle1->Allocate();
1840 outputHandle->Allocate();
1841
1842 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1843 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1844
1845 ExecuteWorkload(*workload, memoryManager);
1846
1847 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1848
1849 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
1850 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001851}
1852
kevmay012b4d88e2019-01-24 14:05:09 +00001853template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
1854LayerTestResult<T, 4> ElementwiseTestHelper(
1855 armnn::IWorkloadFactory & workloadFactory,
1856 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
1857 const unsigned int shape0[4], std::vector<T> values0,
1858 const unsigned int shape1[4], std::vector<T> values1,
1859 const unsigned int outShape[4], std::vector<T> outValues,
1860 float qScale = 0.0f, int qOffset = 0)
1861{
1862 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
1863 (workloadFactory,
1864 memoryManager,
1865 shape0,
1866 values0,
1867 shape1,
1868 values1,
1869 outShape,
1870 outValues,
1871 qScale,
1872 qOffset);
1873}
1874}
1875
1876LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
1877 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001878{
1879 const unsigned int width = 2;
1880 const unsigned int height = 2;
1881 const unsigned int channelCount = 2;
1882 const unsigned int batchSize = 2;
1883
1884 unsigned int shape[] = { batchSize, channelCount, height, width };
1885
1886 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
1887 3, 3, 3, 3, 4, 4, 4, 4 });
1888
1889 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
1890 5, 5, 5, 5, 4, 4, 4, 4 });
1891
kevmay012b4d88e2019-01-24 14:05:09 +00001892 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
1893 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001894
kevmay012b4d88e2019-01-24 14:05:09 +00001895 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001896 workloadFactory,
1897 memoryManager,
1898 shape,
1899 input0,
1900 shape,
1901 input1,
1902 shape,
1903 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001904}
1905
kevmay012b4d88e2019-01-24 14:05:09 +00001906LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001907 armnn::IWorkloadFactory& workloadFactory,
1908 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1909{
1910 unsigned int shape0[] = { 1, 2, 2, 2 };
1911 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1912
1913 unsigned int shape1[] = { 1, 1, 1, 1 };
1914 std::vector<float> input1({ 1 });
1915
kevmay012b4d88e2019-01-24 14:05:09 +00001916 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001917
kevmay012b4d88e2019-01-24 14:05:09 +00001918 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001919 workloadFactory,
1920 memoryManager,
1921 shape0,
1922 input0,
1923 shape1,
1924 input1,
1925 shape0,
1926 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001927}
1928
kevmay012b4d88e2019-01-24 14:05:09 +00001929LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001930 armnn::IWorkloadFactory& workloadFactory,
1931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1932{
1933 const unsigned int shape0[] = { 1, 2, 2, 3 };
1934 const unsigned int shape1[] = { 1, 1, 1, 3 };
1935
1936 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
1937 7, 8, 9, 10, 11, 12 });
1938
1939 std::vector<float> input1({ 1, 2, 3});
1940
kevmay012b4d88e2019-01-24 14:05:09 +00001941 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
1942 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001943
kevmay012b4d88e2019-01-24 14:05:09 +00001944 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001945 workloadFactory,
1946 memoryManager,
1947 shape0,
1948 input0,
1949 shape1,
1950 input1,
1951 shape0,
1952 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001953}
1954
1955LayerTestResult<uint8_t, 4> EqualUint8Test(
1956 armnn::IWorkloadFactory& workloadFactory,
1957 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1958{
1959 unsigned int shape[] = { 2, 2, 2, 2 };
1960
1961 // See dequantized values to the right.
1962 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00001963 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001964
1965 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
1966 3, 3, 3, 3, 5, 5, 5, 5 });
1967
1968 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
1969 1, 1, 1, 1, 0, 0, 0, 0 });
1970
kevmay012b4d88e2019-01-24 14:05:09 +00001971 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
1972 armnn::DataType::QuantisedAsymm8,
1973 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001974 workloadFactory,
1975 memoryManager,
1976 shape,
1977 input0,
1978 shape,
1979 input1,
1980 shape,
1981 output,
1982 1.0f,
1983 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001984}
1985
1986LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
1987 armnn::IWorkloadFactory& workloadFactory,
1988 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1989{
1990 const unsigned int shape0[] = { 1, 2, 2, 3 };
1991 const unsigned int shape1[] = { 1, 1, 1, 1 };
1992
1993 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
1994 7, 8, 9, 10, 11, 12 });
1995
1996 std::vector<uint8_t> input1({ 1 });
1997
1998 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
1999 0, 0, 0, 0, 0, 0 });
2000
kevmay012b4d88e2019-01-24 14:05:09 +00002001 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2002 armnn::DataType::QuantisedAsymm8,
2003 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002004 workloadFactory,
2005 memoryManager,
2006 shape0,
2007 input0,
2008 shape1,
2009 input1,
2010 shape0,
2011 output,
2012 1.0f,
2013 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002014}
2015
2016LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2017 armnn::IWorkloadFactory& workloadFactory,
2018 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2019{
2020 const unsigned int shape0[] = { 1, 2, 2, 3 };
2021 const unsigned int shape1[] = { 1, 1, 1, 3 };
2022
2023 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2024 7, 8, 9, 10, 11, 12 });
2025
2026 std::vector<uint8_t> input1({ 1, 1, 3});
2027
2028 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2029 0, 0, 0, 0, 0, 0 });
2030
kevmay012b4d88e2019-01-24 14:05:09 +00002031 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2032 armnn::DataType::QuantisedAsymm8,
2033 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002034 workloadFactory,
2035 memoryManager,
2036 shape0,
2037 input0,
2038 shape1,
2039 input1,
2040 shape0,
2041 output,
2042 1.0f,
2043 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002044}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002045
kevmay012b4d88e2019-01-24 14:05:09 +00002046LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002047 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2048{
2049 const unsigned int width = 2;
2050 const unsigned int height = 2;
2051 const unsigned int channelCount = 2;
2052 const unsigned int batchSize = 2;
2053
2054 unsigned int shape[] = { batchSize, channelCount, height, width };
2055
2056 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2057 3, 3, 3, 3, 4, 4, 4, 4 });
2058
2059 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2060 5, 5, 5, 5, 4, 4, 4, 4 });
2061
kevmay012b4d88e2019-01-24 14:05:09 +00002062 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2063 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002064
kevmay012b4d88e2019-01-24 14:05:09 +00002065 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002066 workloadFactory,
2067 memoryManager,
2068 shape,
2069 input0,
2070 shape,
2071 input1,
2072 shape,
2073 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002074}
2075
kevmay012b4d88e2019-01-24 14:05:09 +00002076LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002077 armnn::IWorkloadFactory& workloadFactory,
2078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2079{
2080 unsigned int shape0[] = { 1, 2, 2, 2 };
2081 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2082
2083 unsigned int shape1[] = { 1, 1, 1, 1 };
2084 std::vector<float> input1({ 1 });
2085
kevmay012b4d88e2019-01-24 14:05:09 +00002086 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002087
kevmay012b4d88e2019-01-24 14:05:09 +00002088 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002089 workloadFactory,
2090 memoryManager,
2091 shape0,
2092 input0,
2093 shape1,
2094 input1,
2095 shape0,
2096 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002097}
2098
kevmay012b4d88e2019-01-24 14:05:09 +00002099LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002100 armnn::IWorkloadFactory& workloadFactory,
2101 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2102{
2103 const unsigned int shape0[] = { 1, 2, 2, 3 };
2104 const unsigned int shape1[] = { 1, 1, 1, 3 };
2105
2106 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2107 7, 8, 9, 10, 11, 12 });
2108
2109 std::vector<float> input1({ 1, 3, 2});
2110
kevmay012b4d88e2019-01-24 14:05:09 +00002111 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2112 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002113
kevmay012b4d88e2019-01-24 14:05:09 +00002114 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002115 workloadFactory,
2116 memoryManager,
2117 shape0,
2118 input0,
2119 shape1,
2120 input1,
2121 shape0,
2122 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002123}
2124
2125LayerTestResult<uint8_t, 4> GreaterUint8Test(
2126 armnn::IWorkloadFactory& workloadFactory,
2127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2128{
2129 unsigned int shape[] = { 2, 2, 2, 2 };
2130
2131 // See dequantized values to the right.
2132 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2133 3, 3, 3, 3, 5, 5, 5, 5 });
2134
2135 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2136 2, 2, 2, 2, 5, 5, 5, 5 });
2137
2138 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
2139 1, 1, 1, 1, 0, 0, 0, 0 });
2140
kevmay012b4d88e2019-01-24 14:05:09 +00002141 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2142 armnn::DataType::QuantisedAsymm8,
2143 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002144 workloadFactory,
2145 memoryManager,
2146 shape,
2147 input0,
2148 shape,
2149 input1,
2150 shape,
2151 output,
2152 1.0f,
2153 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002154}
2155
2156LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
2157 armnn::IWorkloadFactory& workloadFactory,
2158 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2159{
2160 const unsigned int shape0[] = { 1, 2, 2, 3 };
2161 const unsigned int shape1[] = { 1, 1, 1, 1 };
2162
2163 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2164 7, 8, 9, 10, 11, 12 });
2165
2166 std::vector<uint8_t> input1({ 1 });
2167
2168 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
2169 1, 1, 1, 1, 1, 1 });
2170
kevmay012b4d88e2019-01-24 14:05:09 +00002171 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2172 armnn::DataType::QuantisedAsymm8,
2173 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002174 workloadFactory,
2175 memoryManager,
2176 shape0,
2177 input0,
2178 shape1,
2179 input1,
2180 shape0,
2181 output,
2182 1.0f,
2183 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002184}
2185
2186LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
2187 armnn::IWorkloadFactory& workloadFactory,
2188 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2189{
2190 const unsigned int shape0[] = { 1, 2, 2, 3 };
2191 const unsigned int shape1[] = { 1, 1, 1, 3 };
2192
2193 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2194 7, 8, 9, 10, 11, 12 });
2195
2196 std::vector<uint8_t> input1({ 1, 1, 3});
2197
2198 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
2199 1, 1, 1, 1, 1, 1 });
2200
kevmay012b4d88e2019-01-24 14:05:09 +00002201 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2202 armnn::DataType::QuantisedAsymm8,
2203 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002204 workloadFactory,
2205 memoryManager,
2206 shape0,
2207 input0,
2208 shape1,
2209 input1,
2210 shape0,
2211 output,
2212 1.0f,
2213 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002214}
2215
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002216LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2218{
2219 const unsigned int width = 2;
2220 const unsigned int height = 2;
2221 const unsigned int channelCount = 2;
2222 const unsigned int batchSize = 2;
2223
2224 unsigned int shape[] = { batchSize, channelCount, height, width };
2225
2226 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2227 3, 3, 3, 3, 4, 4, 4, 4 });
2228
2229 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2230 4, 4, 4, 4, 5, 5, 5, 5 });
2231
2232 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
2233 4, 4, 4, 4, 5, 5, 5, 5 });
2234
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002235 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2236 workloadFactory,
2237 memoryManager,
2238 shape,
2239 input0,
2240 shape,
2241 input1,
2242 shape,
2243 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002244}
2245
2246LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
2247 armnn::IWorkloadFactory& workloadFactory,
2248 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2249{
2250 unsigned int shape0[] = { 1, 2, 2, 2 };
2251 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2252
2253 unsigned int shape1[] = { 1, 1, 1, 1 };
2254 std::vector<float> input1({ 2 });
2255
2256 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
2257
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002258 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2259 workloadFactory,
2260 memoryManager,
2261 shape0,
2262 input0,
2263 shape1,
2264 input1,
2265 shape0,
2266 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002267}
2268
2269LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
2270 armnn::IWorkloadFactory& workloadFactory,
2271 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2272{
2273 const unsigned int shape0[] = { 1, 2, 2, 3 };
2274 const unsigned int shape1[] = { 1, 1, 1, 3 };
2275
2276 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2277 7, 8, 9, 10, 11, 12 });
2278
2279 std::vector<float> input1({ 1, 2, 3});
2280
2281 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002282 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002283
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002284 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2285 workloadFactory,
2286 memoryManager,
2287 shape0,
2288 input0,
2289 shape1,
2290 input1,
2291 shape0,
2292 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002293}
2294
2295LayerTestResult<uint8_t, 4> MaximumUint8Test(
2296 armnn::IWorkloadFactory& workloadFactory,
2297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2298{
2299 unsigned int shape[] = { 2, 2, 2, 2 };
2300
2301 // See dequantized values to the right.
2302 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2303 3, 3, 3, 3, 4, 4, 4, 4 });
2304
2305 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2306 4, 4, 4, 4, 5, 5, 5, 5 });
2307
2308 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2309 4, 4, 4, 4, 5, 5, 5, 5 });
2310
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002311 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2312 workloadFactory,
2313 memoryManager,
2314 shape,
2315 input0,
2316 shape,
2317 input1,
2318 shape,
2319 output,
2320 1.0f,
2321 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002322}
2323
2324LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2325 armnn::IWorkloadFactory& workloadFactory,
2326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2327{
2328 const unsigned int shape0[] = { 1, 2, 2, 3 };
2329 const unsigned int shape1[] = { 1, 1, 1, 1 };
2330
2331 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2332 7, 8, 9, 10, 11, 12 });
2333
2334 std::vector<uint8_t> input1({2});
2335
2336 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2337 7, 8, 9, 10, 11, 12 });
2338
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002339 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2340 workloadFactory,
2341 memoryManager,
2342 shape0,
2343 input0,
2344 shape1,
2345 input1,
2346 shape0,
2347 output,
2348 1.0f,
2349 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002350}
2351
2352LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2353 armnn::IWorkloadFactory& workloadFactory,
2354 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2355{
2356 const unsigned int shape0[] = { 1, 2, 2, 3 };
2357 const unsigned int shape1[] = { 1, 1, 1, 3 };
2358
2359 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2360 7, 8, 9, 10, 11, 12 });
2361
2362 std::vector<uint8_t> input1({ 1, 10, 3});
2363
2364 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2365 7, 10, 9, 10, 11, 12 });
2366
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002367 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2368 workloadFactory,
2369 memoryManager,
2370 shape0,
2371 input0,
2372 shape1,
2373 input1,
2374 shape0,
2375 output,
2376 1.0f,
2377 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002378}
2379
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002380LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
2381 armnn::IWorkloadFactory& workloadFactory,
2382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2383{
2384 unsigned int shape0[] = { 1, 2, 2, 2 };
2385 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2386
2387 unsigned int shape1[] = { 1, 1, 1, 1 };
2388 std::vector<float> input1({ 2 });
2389
2390 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
2391
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002392 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2393 workloadFactory,
2394 memoryManager,
2395 shape0,
2396 input0,
2397 shape1,
2398 input1,
2399 shape0,
2400 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002401}
2402
2403
2404LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
2405 armnn::IWorkloadFactory& workloadFactory,
2406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2407{
2408 unsigned int shape0[] = { 1, 2, 2, 2 };
2409 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
2410
2411 unsigned int shape1[] = { 1, 1, 1, 1 };
2412 std::vector<float> input1({ 5 });
2413
2414 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
2415
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002416 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2417 workloadFactory,
2418 memoryManager,
2419 shape0,
2420 input0,
2421 shape1,
2422 input1,
2423 shape0,
2424 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002425}
2426
2427LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
2428 armnn::IWorkloadFactory & workloadFactory,
2429 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
2430{
2431 const unsigned int shape0[] = { 1, 2, 2, 3 };
2432 const unsigned int shape1[] = { 1, 1, 1, 3 };
2433
2434 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
2435 7, 1, 2, 3, 4, 5 });
2436
2437 std::vector<uint8_t> input1({ 1, 2, 3});
2438
2439 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
2440 1, 1, 2, 1, 2, 3 });
2441
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002442 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2443 workloadFactory,
2444 memoryManager,
2445 shape0,
2446 input0,
2447 shape1,
2448 input1,
2449 shape0,
2450 output,
2451 1.0f,
2452 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002453}
2454
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002455namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002456LayerTestResult<float,4> MultiplicationTestHelper(
2457 armnn::IWorkloadFactory& workloadFactory,
2458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2459 const unsigned int shape0[4],
2460 const std::vector<float> & values0,
2461 const unsigned int shape1[4],
2462 const std::vector<float> & values1,
2463 const unsigned int outShape[4],
2464 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00002465{
surmeh01bceff2f2018-03-29 16:29:27 +01002466 const size_t dimensionCount = 4;
2467 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
2468 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
2469 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00002470
surmeh01bceff2f2018-03-29 16:29:27 +01002471 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
2472 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00002473
2474 LayerTestResult<float,4> ret(outputTensorInfo);
2475
2476 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2477 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2478 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2479
2480 armnn::MultiplicationQueueDescriptor data;
2481 armnn::WorkloadInfo info;
2482 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2483 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2484 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2485
2486 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2487
2488 inputHandle0->Allocate();
2489 inputHandle1->Allocate();
2490 outputHandle->Allocate();
2491
2492 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2493 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2494
2495 workload->Execute();
2496
2497 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2498
surmeh01bceff2f2018-03-29 16:29:27 +01002499 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00002500 return ret;
2501}
surmeh01bceff2f2018-03-29 16:29:27 +01002502} // anonymous namespace
2503
2504
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002505LayerTestResult<float,4> MultiplicationTest(
2506 armnn::IWorkloadFactory& workloadFactory,
2507 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002508{
2509 const unsigned int width = 2;
2510 const unsigned int height = 2;
2511 const unsigned int channelCount = 2;
2512 const unsigned int batchSize = 2;
2513
2514 unsigned int shape[] = { batchSize, channelCount, height, width };
2515
2516 std::vector<float> input0({
2517 1, 1, 1, 1, 2, 2, 2, 2,
2518 3, 3, 3, 3, 4, 4, 4, 4 });
2519
2520 std::vector<float> input1({
2521 2, 2, 2, 2, 3, 3, 3, 3,
2522 4, 4, 4, 4, 5, 5, 5, 5 });
2523
2524 std::vector<float> output({
2525 2, 2, 2, 2, 6, 6, 6, 6,
2526 12, 12, 12, 12, 20, 20, 20, 20 });
2527
2528 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002529 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002530 shape,
2531 input0,
2532 shape,
2533 input1,
2534 shape,
2535 output);
2536}
2537
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002538LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
2539 armnn::IWorkloadFactory& workloadFactory,
2540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002541{
2542 unsigned int shape0[] = { 1, 2, 2, 2 };
2543 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2544
2545 unsigned int shape1[] = { 1, 1, 1, 1 };
2546 std::vector<float> input1({ 2 });
2547
2548 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
2549
2550 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002551 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002552 shape0,
2553 input0,
2554 shape1,
2555 input1,
2556 shape0,
2557 output);
2558}
2559
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002560LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
2561 armnn::IWorkloadFactory& workloadFactory,
2562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002563{
2564 unsigned int shape0[] = { 1, 3, 3, 2 };
2565 std::vector<float> input0({
2566 1, 2, 3, 4, 5, 6,
2567 7, 8, 9, 10, 11, 12,
2568 13, 14, 15, 16, 17, 18});
2569
2570 unsigned int shape1[] = { 1, 1, 1, 2 };
2571 std::vector<float> input1({ 1, 2 });
2572
2573 std::vector<float> output({
2574 1, 4, 3, 8, 5, 12,
2575 7, 16, 9, 20, 11, 24,
2576 13, 28, 15, 32, 17, 36});
2577
2578 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002579 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002580 shape0,
2581 input0,
2582 shape1,
2583 input1,
2584 shape0,
2585 output);
2586}
telsoa014fcda012018-03-09 14:13:49 +00002587
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002588LayerTestResult<float,4> CompareMultiplicationTest(
2589 armnn::IWorkloadFactory& workloadFactory,
2590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2591 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002592{
2593 const unsigned int width = 16;
2594 const unsigned int height = 32;
2595 const unsigned int channelCount = 2;
2596 const unsigned int batchSize = 5;
2597
2598 armnn::TensorInfo inputTensorInfo0;
2599 armnn::TensorInfo inputTensorInfo1;
2600 armnn::TensorInfo outputTensorInfo;
2601
2602 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
2603
2604 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2605 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2606 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2607
2608 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
2609
2610 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
2611 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
2612
2613 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2614 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2615 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2616
2617 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
2618 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2619 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2620
2621 armnn::MultiplicationQueueDescriptor data;
2622 armnn::WorkloadInfo info;
2623 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2624 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2625 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2626
2627 armnn::MultiplicationQueueDescriptor refData = data;
2628 armnn::WorkloadInfo refInfo = info;
2629 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
2630 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
2631 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2632
2633 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2634 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
2635
2636 inputHandle0->Allocate();
2637 inputHandle1->Allocate();
2638 outputHandle->Allocate();
2639 inputHandle0Ref->Allocate();
2640 inputHandle1Ref->Allocate();
2641 outputHandleRef->Allocate();
2642
2643 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2644 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2645 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
2646 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2647
2648 workload->Execute();
2649 workloadRef->Execute();
2650
2651 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
2652 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
2653
2654 return comparisonResult;
2655}
2656
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002657LayerTestResult<float,4> CompareBatchNormTest(
2658 armnn::IWorkloadFactory& workloadFactory,
2659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2660 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002661{
2662 const unsigned int width = 2;
2663 const unsigned int height = 3;
2664 const unsigned int channels = 5;
2665 const unsigned int batchSize = 3;
2666
2667 armnn::TensorInfo inputTensorInfo;
2668 armnn::TensorInfo outputTensorInfo;
2669 armnn::TensorInfo tensorInfo;
2670
2671 constexpr unsigned int shape[] = {batchSize, channels, height, width};
2672 constexpr unsigned int tensorShape[] = {channels};
2673
2674 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2675 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2676 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
2677
2678 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
2679
2680 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
2681 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
2682 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
2683 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
2684
2685 LayerTestResult<float,4> ret(outputTensorInfo);
2686
2687 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2688 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2689
2690 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
2691 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2692
2693 armnn::BatchNormalizationQueueDescriptor data;
2694 armnn::WorkloadInfo info;
2695 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
2696 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
2697 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
2698 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
2699
2700 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
2701 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
2702 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
2703 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
2704
2705 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2706 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2707 data.m_Mean = &meanTensor;
2708 data.m_Variance = &varianceTensor;
2709 data.m_Beta = &betaTensor;
2710 data.m_Gamma = &gammaTensor;
2711 data.m_Parameters.m_Eps = 0.01f;
2712
2713 armnn::BatchNormalizationQueueDescriptor refData = data;
2714 armnn::WorkloadInfo refInfo = info;
2715 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
2716 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2717
2718 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
2719 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
2720
2721 inputHandle->Allocate();
2722 outputHandle->Allocate();
2723 inputHandleRef->Allocate();
2724 outputHandleRef->Allocate();
2725
2726 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2727 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
2728
2729 workload->Execute();
2730 workloadRef->Execute();
2731
2732 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2733 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2734
2735 return ret;
2736}
2737
surmeh013537c2c2018-05-18 16:31:43 +01002738template<typename T>
2739void PermuteTensorData(
2740 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002742 const armnn::PermutationVector& mappings,
2743 armnn::TensorInfo & inputTensorInfo,
2744 const T * inputData,
2745 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00002746{
surmeh013537c2c2018-05-18 16:31:43 +01002747 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
2748 if (inputData == nullptr)
2749 {
2750 // Nullptr is an error in the test. By returning without doing the concatenation
2751 // I expect the caller to fail the test. It still makes sense to report this as
2752 // an assert for Debug builds.
2753 return;
2754 }
telsoa014fcda012018-03-09 14:13:49 +00002755
surmeh013537c2c2018-05-18 16:31:43 +01002756 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
2757
2758 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2759 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2760
2761 armnn::PermuteQueueDescriptor queueDescriptor;
2762 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
2763 armnn::WorkloadInfo workloadInfo;
2764 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
2765 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2766
2767 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
2768
2769 inputHandle->Allocate();
2770 outputHandle->Allocate();
2771
2772 CopyDataToITensorHandle(inputHandle.get(), inputData);
2773
2774 workload->Execute();
2775
2776 outputData.resize(outputTensorInfo.GetNumElements());
2777 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
2778 inputTensorInfo = outputTensorInfo;
2779}
2780
2781armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
2782 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2783 unsigned int concatDim)
2784{
telsoa014fcda012018-03-09 14:13:49 +00002785 std::vector<armnn::TensorShape> shapes;
2786 shapes.reserve(inputTensorInfos.size());
2787 for (const armnn::TensorInfo& it: inputTensorInfos)
2788 {
2789 shapes.push_back(it.GetShape());
2790 }
surmeh013537c2c2018-05-18 16:31:43 +01002791
2792 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
2793 shapes.end(),
2794 concatDim);
2795}
2796
2797//
narpra015cdda352018-11-19 15:30:27 +00002798// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
2799// In case of <4 dimensions we need to make sure that the concat dimensions are at least
2800// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01002801//
2802
2803bool NeedPermuteForConcat(
2804 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2805 unsigned int concatDim)
2806{
2807 // See note above. Additionally we expect the input shapes to have the
2808 // same number of dimensions.
2809 unsigned int nDimensions = 0;
2810
telsoa01c577f2c2018-08-31 09:22:23 +01002811 // Determine the number of dimensions as well as sanity check them
2812 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01002813 for (auto && tensorInfo : inputTensorInfos)
2814 {
2815 if (!nDimensions)
2816 {
2817 nDimensions = tensorInfo.GetShape().GetNumDimensions();
2818 }
2819 else
2820 {
2821 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
2822 "Input shapes must have the same number of dimensions");
2823 }
2824 }
2825
narpra015cdda352018-11-19 15:30:27 +00002826 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01002827}
2828
2829armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
2830{
2831 unsigned int numDims = inputShape.GetNumDimensions();
2832 if (numDims >= 3)
2833 {
2834 // Nothing to do if the inputShape has at least 3 dimensions.
2835 return inputShape;
2836 }
2837
2838 std::vector<unsigned int> newDims(size_t(3), 1u);
2839 unsigned int expandedBy = 3 - numDims;
2840 for (unsigned int i=0; i<numDims; ++i)
2841 {
2842 newDims[expandedBy+i] = inputShape[i];
2843 }
2844 return armnn::TensorShape(3u, &newDims[0]);
2845}
2846
2847void Generate3dPermuteVectorForConcat(
2848 unsigned int numDimensions,
2849 unsigned int & concatDim,
2850 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
2851{
2852 BOOST_ASSERT_MSG(numDimensions <= 3,
2853 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01002854 unsigned int expandedBy = 3 - numDimensions;
2855 unsigned int expandedConcatAxis = concatDim + expandedBy;
2856
2857 if (expandedConcatAxis == 2)
2858 {
2859 concatDim = 0;
2860 armnn::PermutationVector forwardPermutation({1, 2, 0});
2861 armnn::PermutationVector reversePermutation({2, 0, 1});
2862 permutations = std::make_pair(forwardPermutation, reversePermutation);
2863 }
2864 else if (expandedConcatAxis == 1)
2865 {
2866 concatDim = 0;
2867 armnn::PermutationVector forwardPermutation({2, 0, 1});
2868 armnn::PermutationVector reversePermutation({1, 2, 0});
2869 permutations = std::make_pair(forwardPermutation, reversePermutation);
2870 }
2871 else
2872 {
2873 BOOST_ASSERT(expandedConcatAxis == 0);
2874 concatDim = 0;
2875 }
2876}
2877
2878//
2879// Permute the input tensors so we can do a supported concatenation.
2880// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
2881// at the front. Finally this function tells what the output shape
2882// of the permuted concatenated tensor is going to be.
2883//
2884template <typename T>
2885void PermuteInputsForConcat(
2886 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002887 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002888 std::vector<armnn::TensorInfo> & inputTensorInfos,
2889 std::vector<T *> & inputData,
2890 std::vector<std::vector<T>> & inputDataStorage,
2891 armnn::PermutationVector & permuteVector,
2892 unsigned int & concatDim,
2893 armnn::TensorInfo & outputTensorInfo)
2894{
2895 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
2896 "Expecting more than one tensor to be concatenated here");
2897
2898 unsigned int numDims = 0;
2899 unsigned int nthInput = 0;
2900 const armnn::PermutationVector identity({0, 1, 2});
2901
2902 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
2903 std::make_pair(identity, identity);
2904
2905 inputDataStorage.resize(inputData.size());
2906
2907 for (auto && tensorInfo : inputTensorInfos)
2908 {
2909 if (numDims == 0)
2910 {
2911 numDims = tensorInfo.GetShape().GetNumDimensions();
2912 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00002913
telsoa01c577f2c2018-08-31 09:22:23 +01002914 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01002915 permuteVector = permutations.second;
2916 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
2917 "Test logic error, we don't need permutation, so we shouldn't arrive here");
2918 }
2919 else
2920 {
2921 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
2922 "All inputs must have the same number of dimensions");
2923 }
2924
2925 armnn::TensorInfo newTensorInfo = tensorInfo;
2926 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
2927
2928 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002929 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002930 permutations.first,
2931 newTensorInfo,
2932 inputData[nthInput],
2933 inputDataStorage[nthInput]);
2934
2935 inputData[nthInput] = inputDataStorage[nthInput].data();
2936 inputTensorInfos[nthInput] = newTensorInfo;
2937
2938 ++nthInput;
2939 }
2940
2941 outputTensorInfo.SetShape(
2942 armnnUtils::Permuted(
2943 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
2944 permutations.first));
2945}
2946
2947
2948//
2949// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01002950// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01002951// output.
2952//
2953template <typename T>
2954void PermuteOutputForConcat(
2955 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002956 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002957 const armnn::TensorInfo & tensorInfo,
2958 const armnn::PermutationVector & permuteVector,
2959 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
2960 T * data)
2961{
2962 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
2963 if (data == nullptr)
2964 {
2965 // Nullptr is an error in the test. By returning without doing the permutation
2966 // I expect the caller to fail the test. It still makes sense to report this as
2967 // an assert for Debug builds.
2968 return;
2969 }
2970
2971 armnn::TensorInfo resultTensorInfo = tensorInfo;
2972 std::vector<T> inputData(tensorInfo.GetNumElements());
2973 std::vector<T> outputData;
2974
2975 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
2976
2977 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002978 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002979 permuteVector,
2980 resultTensorInfo,
2981 &inputData[0],
2982 outputData);
2983
2984 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
2985}
2986
2987template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002988void Concatenate(
2989 armnn::IWorkloadFactory& workloadFactory,
2990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2991 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2992 std::initializer_list<T *> inputsOrig,
2993 const armnn::TensorInfo& outputTensorInfoOrig,
2994 T * output,
narpra015cdda352018-11-19 15:30:27 +00002995 unsigned int concatDim,
2996 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01002997{
2998 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2999 if (output == nullptr)
3000 {
3001 // Nullptr is an error in the test. By returning without doing the permutation
3002 // I expect the caller to fail the test. It still makes sense to report this as
3003 // an assert for Debug builds.
3004 return;
3005 }
3006
telsoa01c577f2c2018-08-31 09:22:23 +01003007 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01003008 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
3009 std::vector<T *> inputs = inputsOrig;
3010 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
3011
3012 armnn::PermutationVector permuteVector{0, 1, 2};
3013
telsoa01c577f2c2018-08-31 09:22:23 +01003014 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01003015 std::vector<std::vector<T>> tmpInputDataStorage;
3016
3017 const size_t inputCount = inputTensorInfos.size();
3018
3019 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
3020
3021 if (needPermuteForConcat)
3022 {
3023 //
3024 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01003025 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01003026 //
3027 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003028 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003029 inputTensorInfos,
3030 inputs,
3031 tmpInputDataStorage,
3032 permuteVector,
3033 concatDim,
3034 outputTensorInfo);
3035 }
3036
narpra015cdda352018-11-19 15:30:27 +00003037 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00003038
3039 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
3040 inputHandles.reserve(inputCount);
3041
narpra015cdda352018-11-19 15:30:27 +00003042 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3043
3044 armnn::MergerQueueDescriptor queueDescriptor;
3045 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
3046 queueDescriptor.m_Parameters = viewsDescriptor;
3047
3048 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003049 {
narpra015cdda352018-11-19 15:30:27 +00003050 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
3051 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
3052 {
3053 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
3054 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
3055 }
telsoa014fcda012018-03-09 14:13:49 +00003056
narpra015cdda352018-11-19 15:30:27 +00003057 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00003058
narpra015cdda352018-11-19 15:30:27 +00003059 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3060 for (unsigned int i = 0; i < inputCount; ++i)
3061 {
3062 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
3063 std::unique_ptr<armnn::ITensorHandle> inputHandle =
3064 subTensorsSupported ?
3065 workloadFactory.CreateSubTensorHandle(*outputHandle,
3066 inputTensorInfo.GetShape(),
3067 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
3068 workloadFactory.CreateTensorHandle(inputTensorInfo);
3069
3070 inputHandles.emplace_back(std::move(inputHandle));
3071 }
3072
telsoa014fcda012018-03-09 14:13:49 +00003073 }
narpra015cdda352018-11-19 15:30:27 +00003074 else
3075 {
3076 for (unsigned int i = 0; i < inputCount; ++i)
3077 {
3078 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
3079 inputHandles.emplace_back(std::move(inputHandle));
3080 }
3081 }
telsoa014fcda012018-03-09 14:13:49 +00003082
3083 for (unsigned int i = 0; i < inputCount; ++i)
3084 {
surmeh013537c2c2018-05-18 16:31:43 +01003085 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00003086 }
3087
3088 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3089
3090 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
3091
3092 for (auto& inputHandle : inputHandles)
3093 {
3094 inputHandle->Allocate();
3095 }
3096
3097 outputHandle->Allocate();
3098
3099 unsigned int nextInputId = 0;
3100 for (auto& inputHandle : inputHandles)
3101 {
surmeh013537c2c2018-05-18 16:31:43 +01003102 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
3103 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00003104 }
3105
3106 workload->Execute();
3107
surmeh013537c2c2018-05-18 16:31:43 +01003108 if (needPermuteForConcat)
3109 {
3110 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003111 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003112 outputTensorInfo,
3113 permuteVector,
3114 std::move(outputHandle),
3115 output);
3116 }
3117 else
3118 {
3119 CopyDataFromITensorHandle(output, outputHandle.get());
3120 }
telsoa014fcda012018-03-09 14:13:49 +00003121}
3122
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003123template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003124LayerTestResult<T, 1> Concatenation1dTestImpl(
3125 armnn::IWorkloadFactory& workloadFactory,
3126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3127 float qScale,
3128 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003129{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003130 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003131
3132 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
3133 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
3134 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
3135
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003136 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003137
3138 LayerTestResult<T, 1> result(outputTensorInfo);
3139
3140 std::vector<T> output;
3141 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003142 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003143 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3144 { input0.data(), input1.data(), input2.data() },
3145 outputTensorInfo,
3146 output.data(),
3147 0,
3148 true);
telsoa014fcda012018-03-09 14:13:49 +00003149
3150 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
3151 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3152 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
3153 }));
3154
3155 return result;
3156}
3157
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003158LayerTestResult<float, 1> Concatenation1dTest(
3159 armnn::IWorkloadFactory& workloadFactory,
3160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003161{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003162 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003163}
3164
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003165template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003166LayerTestResult<T, 2> Concatenation2dTestImpl(
3167 armnn::IWorkloadFactory& workloadFactory,
3168 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003169 const armnn::TensorInfo& outputTensorInfo,
3170 unsigned int dimension,
3171 const float qScale,
3172 const int32_t qOffset)
3173{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003174 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003175
3176 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3177 // Batch 0
3178 1.0f, 2.0f, 3.0f,
3179
3180 // Batch 1
3181 10.0f, 11.0f, 12.0f,
3182 }));
3183
3184 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3185 // Batch 0
3186 4.0f, 5.0f, 6.0f,
3187
3188 // Batch 1
3189 13.0f, 14.0f, 15.0f,
3190 }));
3191
3192 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3193 // Batch 0
3194 7.0f, 8.0f, 9.0f,
3195
3196 // Batch 1
3197 16.0f, 17.0f, 18.0f,
3198 }));
3199
3200 LayerTestResult<T, 2> result(outputTensorInfo);
3201
3202 std::vector<T> output;
3203 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003204 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003205 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3206 { input0.data(), input1.data(), input2.data() },
3207 outputTensorInfo,
3208 output.data(),
3209 dimension,
3210 true);
telsoa014fcda012018-03-09 14:13:49 +00003211
3212 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3213 return result;
3214}
3215
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003216template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003217LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
3218 armnn::IWorkloadFactory& workloadFactory,
3219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3220 float qScale,
3221 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003222{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003223 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003224
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003225 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3226 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
3227
telsoa014fcda012018-03-09 14:13:49 +00003228 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3229 // Batch 0
3230 1.0f, 2.0f, 3.0f,
3231
3232 // Batch 1
3233 10.0f, 11.0f, 12.0f,
3234
3235 // Batch 2
3236 4.0f, 5.0f, 6.0f,
3237
3238 // Batch 3
3239 13.0f, 14.0f, 15.0f,
3240
3241 // Batch 4
3242 7.0f, 8.0f, 9.0f,
3243
3244 // Batch 5
3245 16.0f, 17.0f, 18.0f,
3246 }));
3247
3248 return result;
3249}
3250
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003251LayerTestResult<float, 2> Concatenation2dDim0Test(
3252 armnn::IWorkloadFactory& workloadFactory,
3253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003254{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003255 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003256}
3257
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003258template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003259LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
3260 armnn::IWorkloadFactory& workloadFactory,
3261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3262 float qScale,
3263 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003264{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003265 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003266
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003267 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3268 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
3269
telsoa014fcda012018-03-09 14:13:49 +00003270 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3271 // Batch 0
3272 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3273
3274 // Batch 1
3275 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
3276 }));
3277
3278 return result;
3279}
3280
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003281LayerTestResult<float, 2> Concatenation2dDim1Test(
3282 armnn::IWorkloadFactory& workloadFactory,
3283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003284{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003285 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003286}
3287
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003288template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003289LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
3290 armnn::IWorkloadFactory& workloadFactory,
3291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3292 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003293 int32_t qOffset)
3294{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003295 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003296 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3297 // Batch 0
3298 1.0f, 2.0f, 3.0f,
3299
3300 // Batch 1
3301 10.0f, 11.0f, 12.0f,
3302 }));
3303
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003304 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003305 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3306 // Batch 0
3307 4.0f, 5.0f, 6.0f,
3308
3309 // Batch 1
3310 13.0f, 14.0f, 15.0f,
3311
3312 // Batch 0
3313 7.0f, 8.0f, 9.0f,
3314 }));
3315
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003316 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003317 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3318 // Batch 1
3319 16.0f, 17.0f, 18.0f,
3320 }));
3321
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003322 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003323 LayerTestResult<T, 2> result(outputTensorInfo);
3324
3325 std::vector<T> output;
3326 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003327 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003328 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3329 { input0.data(), input1.data(), input2.data() },
3330 outputTensorInfo,
3331 output.data(),
3332 0,
3333 true);
telsoa014fcda012018-03-09 14:13:49 +00003334
3335 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3336 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3337 // Batch 0
3338 1.0f, 2.0f, 3.0f,
3339
3340 // Batch 1
3341 10.0f, 11.0f, 12.0f,
3342
3343 // Batch 2
3344 4.0f, 5.0f, 6.0f,
3345
3346 // Batch 3
3347 13.0f, 14.0f, 15.0f,
3348
3349 // Batch 4
3350 7.0f, 8.0f, 9.0f,
3351
3352 // Batch 5
3353 16.0f, 17.0f, 18.0f,
3354 }));
3355
3356 return result;
3357}
3358
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003359LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
3360 armnn::IWorkloadFactory& workloadFactory,
3361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003362{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003363 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
3364 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003365}
3366
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003367template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003368LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
3369 armnn::IWorkloadFactory& workloadFactory,
3370 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3371 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003372 int32_t qOffset)
3373{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003374 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003375 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3376 // Batch 0
3377 1.0f, 2.0f, 3.0f,
3378
3379 // Batch 1
3380 10.0f, 11.0f, 12.0f,
3381 }));
3382
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003383 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003384 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3385 // Batch 0
3386 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
3387
3388 // Batch 1
3389 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
3390 }));
3391
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003392 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003393 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3394 // Batch 0
3395 9.0f,
3396
3397 // Batch 1
3398 18.0f
3399 }));
3400
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003401 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003402 LayerTestResult<T, 2> result(outputTensorInfo);
3403
3404 std::vector<T> output;
3405 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003406 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003407 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3408 { input0.data(), input1.data(), input2.data() },
3409 outputTensorInfo,
3410 output.data(),
3411 1,
3412 true);
telsoa014fcda012018-03-09 14:13:49 +00003413
3414 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3415 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3416 // Batch 0
3417 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3418
3419 // Batch 1
3420 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
3421 }));
3422
3423 return result;
3424}
3425
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003426LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
3427 armnn::IWorkloadFactory& workloadFactory,
3428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003429{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003430 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
3431 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003432}
3433
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003434template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003435LayerTestResult<T, 3> Concatenation3dTestImpl(
3436 armnn::IWorkloadFactory& workloadFactory,
3437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003438 const armnn::TensorInfo& outputTensorInfo,
3439 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00003440 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00003441 float qScale,
3442 int32_t qOffset)
3443{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003444 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003445
3446 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3447 // Batch 0, Channel 0
3448 1.0f, 2.0f,
3449
3450 // Batch 0, Channel 1
3451 3.0f, 4.0f,
3452
3453 // Batch 0, Channel 2
3454 5.0f, 6.0f,
3455
3456 // Batch 1, Channel 0
3457 19.0f, 20.0f,
3458
3459 // Batch 1, Channel 1
3460 21.0f, 22.0f,
3461
3462 // Batch 1, Channel 2
3463 23.0f, 24.0f
3464 }));
3465
3466 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3467 // Batch 0, Channel 0
3468 7.0f, 8.0f,
3469
3470 // Batch 0, Channel 1
3471 9.0f, 10.0f,
3472
3473 // Batch 0, Channel 2
3474 11.0f, 12.0f,
3475
3476 // Batch 1, Channel 0
3477 25.0f, 26.0f,
3478
3479 // Batch 1, Channel 1
3480 27.0f, 28.0f,
3481
3482 // Batch 1, Channel 2
3483 29.0f, 30.0f
3484 }));
3485
3486 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3487 // Batch 0, Channel 0
3488 13.0f, 14.0f,
3489
3490 // Batch 0, Channel 1
3491 15.0f, 16.0f,
3492
3493 // Batch 0, Channel 2
3494 17.0f, 18.0f,
3495
3496 // Batch 1, Channel 0
3497 31.0f, 32.0f,
3498
3499 // Batch 1, Channel 1
3500 33.0f, 34.0f,
3501
3502 // Batch 1, Channel 2
3503 35.0f, 36.0f
3504 }));
3505
3506 LayerTestResult<T, 3> result(outputTensorInfo);
3507
3508 std::vector<T> output;
3509 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003510 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003511 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3512 { input0.data(), input1.data(), input2.data() },
3513 outputTensorInfo,
3514 output.data(),
3515 dimension,
3516 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003517
3518 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3519 return result;
3520}
3521
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003522template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003523LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
3524 armnn::IWorkloadFactory& workloadFactory,
3525 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3526 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003527 int32_t qOffset)
3528{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003529 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003530
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003531 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
3532 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
3533
telsoa014fcda012018-03-09 14:13:49 +00003534 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3535 // Batch 0, Channel 0
3536 1.0f, 2.0f,
3537
3538 // Batch 0, Channel 1
3539 3.0f, 4.0f,
3540
3541 // Batch 0, Channel 2
3542 5.0f, 6.0f,
3543
3544 // Batch 1, Channel 0
3545 19.0f, 20.0f,
3546
3547 // Batch 1, Channel 1
3548 21.0f, 22.0f,
3549
3550 // Batch 1, Channel 2
3551 23.0f, 24.0f,
3552
3553 // Batch 2, Channel 0
3554 7.0f, 8.0f,
3555
3556 // Batch 2, Channel 1
3557 9.0f, 10.0f,
3558
3559 // Batch 2, Channel 2
3560 11.0f, 12.0f,
3561
3562 // Batch 3, Channel 0
3563 25.0f, 26.0f,
3564
3565 // Batch 3, Channel 1
3566 27.0f, 28.0f,
3567
3568 // Batch 3, Channel 2
3569 29.0f, 30.0f,
3570
3571 // Batch 4, Channel 0
3572 13.0f, 14.0f,
3573
3574 // Batch 4, Channel 1
3575 15.0f, 16.0f,
3576
3577 // Batch 4, Channel 2
3578 17.0f, 18.0f,
3579
3580 // Batch 5, Channel 0
3581 31.0f, 32.0f,
3582
3583 // Batch 5, Channel 1
3584 33.0f, 34.0f,
3585
3586 // Batch 5, Channel 2
3587 35.0f, 36.0f
3588 }));
narpra015cdda352018-11-19 15:30:27 +00003589
telsoa014fcda012018-03-09 14:13:49 +00003590 return result;
3591}
3592
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003593LayerTestResult<float, 3> Concatenation3dDim0Test(
3594 armnn::IWorkloadFactory& workloadFactory,
3595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003596{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003597 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003598}
3599
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003600template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003601LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
3602 armnn::IWorkloadFactory& workloadFactory,
3603 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3604 float qScale,
3605 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003606{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003607 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003608
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003609 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
3610 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00003611
telsoa014fcda012018-03-09 14:13:49 +00003612 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3613 // Batch 0, Channel 0
3614 1.0f, 2.0f,
3615
3616 // Batch 0, Channel 1
3617 3.0f, 4.0f,
3618
3619 // Batch 0, Channel 2
3620 5.0f, 6.0f,
3621
3622 // Batch 0, Channel 3
3623 7.0f, 8.0f,
3624
3625 // Batch 0, Channel 4
3626 9.0f, 10.0f,
3627
3628 // Batch 0, Channel 5
3629 11.0f, 12.0f,
3630
3631 // Batch 0, Channel 6
3632 13.0f, 14.0f,
3633
3634 // Batch 0, Channel 7
3635 15.0f, 16.0f,
3636
3637 // Batch 0, Channel 8
3638 17.0f, 18.0f,
3639
3640 // Batch 1, Channel 0
3641 19.0f, 20.0f,
3642
3643 // Batch 1, Channel 1
3644 21.0f, 22.0f,
3645
3646 // Batch 1, Channel 2
3647 23.0f, 24.0f,
3648
3649 // Batch 1, Channel 3
3650 25.0f, 26.0f,
3651
3652 // Batch 1, Channel 4
3653 27.0f, 28.0f,
3654
3655 // Batch 1, Channel 5
3656 29.0f, 30.0f,
3657
3658 // Batch 1, Channel 6
3659 31.0f, 32.0f,
3660
3661 // Batch 1, Channel 7
3662 33.0f, 34.0f,
3663
3664 // Batch 1, Channel 8
3665 35.0f, 36.0f
3666 }));
3667
3668 return result;
3669}
3670
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003671LayerTestResult<float, 3> Concatenation3dDim1Test(
3672 armnn::IWorkloadFactory& workloadFactory,
3673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003674{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003675 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003676}
3677
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003678template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003679LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
3680 armnn::IWorkloadFactory& workloadFactory,
3681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003682 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003683 float qScale,
3684 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003685{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003686 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003687
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003688 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
3689 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00003690
telsoa014fcda012018-03-09 14:13:49 +00003691 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3692 // Batch 0, Channel 0
3693 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
3694
3695 // Batch 0, Channel 1
3696 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
3697
3698 // Batch 0, Channel 2
3699 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
3700
3701 // Batch 1, Channel 0
3702 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
3703
3704 // Batch 1, Channel 1
3705 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
3706
3707 // Batch 1, Channel 2
3708 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
3709 }));
3710
3711 return result;
3712}
3713
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003714LayerTestResult<float, 3> Concatenation3dDim2Test(
3715 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00003716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3717 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003718{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003719 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
3720 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003721}
3722
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003723template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003724LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
3725 armnn::IWorkloadFactory& workloadFactory,
3726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3727 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003728 int32_t qOffset)
3729{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003730 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003731 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3732 // Batch 0, Channel 0
3733 1.0f, 2.0f,
3734
3735 // Batch 0, Channel 1
3736 3.0f, 4.0f,
3737
3738 // Batch 0, Channel 2
3739 5.0f, 6.0f,
3740
3741 // Batch 1, Channel 0
3742 19.0f, 20.0f,
3743
3744 // Batch 1, Channel 1
3745 21.0f, 22.0f,
3746
3747 // Batch 1, Channel 2
3748 23.0f, 24.0f
3749 }));
3750
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003751 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003752 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3753 // Batch 0, Channel 0
3754 7.0f, 8.0f,
3755
3756 // Batch 0, Channel 1
3757 9.0f, 10.0f,
3758
3759 // Batch 0, Channel 2
3760 11.0f, 12.0f,
3761 }));
3762
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003763 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003764 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3765 // Batch 0, Channel 0
3766 25.0f, 26.0f,
3767
3768 // Batch 0, Channel 1
3769 27.0f, 28.0f,
3770
3771 // Batch 0, Channel 2
3772 29.0f, 30.0f,
3773
3774 // Batch 1, Channel 0
3775 13.0f, 14.0f,
3776
3777 // Batch 1, Channel 1
3778 15.0f, 16.0f,
3779
3780 // Batch 1, Channel 2
3781 17.0f, 18.0f,
3782
3783 // Batch 2, Channel 0
3784 31.0f, 32.0f,
3785
3786 // Batch 2, Channel 1
3787 33.0f, 34.0f,
3788
3789 // Batch 2, Channel 2
3790 35.0f, 36.0f
3791 }));
3792
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003793 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003794 LayerTestResult<T, 3> result(outputTensorInfo);
3795
3796 std::vector<T> output;
3797 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003798 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003799 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3800 { input0.data(), input1.data(), input2.data() },
3801 outputTensorInfo,
3802 output.data(),
3803 0,
3804 true);
telsoa014fcda012018-03-09 14:13:49 +00003805
3806 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3807 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3808 // Batch 0, Channel 0
3809 1.0f, 2.0f,
3810
3811 // Batch 0, Channel 1
3812 3.0f, 4.0f,
3813
3814 // Batch 0, Channel 2
3815 5.0f, 6.0f,
3816
3817 // Batch 1, Channel 0
3818 19.0f, 20.0f,
3819
3820 // Batch 1, Channel 1
3821 21.0f, 22.0f,
3822
3823 // Batch 1, Channel 2
3824 23.0f, 24.0f,
3825
3826 // Batch 2, Channel 0
3827 7.0f, 8.0f,
3828
3829 // Batch 2, Channel 1
3830 9.0f, 10.0f,
3831
3832 // Batch 2, Channel 2
3833 11.0f, 12.0f,
3834
3835 // Batch 3, Channel 0
3836 25.0f, 26.0f,
3837
3838 // Batch 3, Channel 1
3839 27.0f, 28.0f,
3840
3841 // Batch 3, Channel 2
3842 29.0f, 30.0f,
3843
3844 // Batch 4, Channel 0
3845 13.0f, 14.0f,
3846
3847 // Batch 4, Channel 1
3848 15.0f, 16.0f,
3849
3850 // Batch 4, Channel 2
3851 17.0f, 18.0f,
3852
3853 // Batch 5, Channel 0
3854 31.0f, 32.0f,
3855
3856 // Batch 5, Channel 1
3857 33.0f, 34.0f,
3858
3859 // Batch 5, Channel 2
3860 35.0f, 36.0f
3861 }));
3862
3863 return result;
3864}
3865
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003866LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
3867 armnn::IWorkloadFactory& workloadFactory,
3868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003869{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003870 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
3871 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003872}
3873
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003874template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003875LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
3876 armnn::IWorkloadFactory& workloadFactory,
3877 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3878 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003879 int32_t qOffset)
3880{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003881 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003882 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3883 // Batch 0, Channel 0
3884 1.0f, 2.0f,
3885
3886 // Batch 0, Channel 1
3887 3.0f, 4.0f,
3888
3889 // Batch 0, Channel 2
3890 5.0f, 6.0f,
3891
3892 // Batch 1, Channel 0
3893 19.0f, 20.0f,
3894
3895 // Batch 1, Channel 1
3896 21.0f, 22.0f,
3897
3898 // Batch 1, Channel 2
3899 23.0f, 24.0f
3900 }));
3901
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003902 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003903 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3904 // Batch 0, Channel 0
3905 7.0f, 8.0f,
3906
3907 // Batch 0, Channel 1
3908 9.0f, 10.0f,
3909
3910 // Batch 0, Channel 2
3911 11.0f, 12.0f,
3912
3913 // Batch 0, Channel 3
3914 25.0f, 26.0f,
3915
3916 // Batch 1, Channel 0
3917 27.0f, 28.0f,
3918
3919 // Batch 1, Channel 1
3920 29.0f, 30.0f,
3921
3922 // Batch 1, Channel 2
3923 13.0f, 14.0f,
3924
3925 // Batch 1, Channel 3
3926 15.0f, 16.0f,
3927 }));
3928
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003929 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003930 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3931 // Batch 0, Channel 0
3932 17.0f, 18.0f,
3933
3934 // Batch 1, Channel 0
3935 31.0f, 32.0f,
3936 }));
3937
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003938 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003939 LayerTestResult<T, 3> result(outputTensorInfo);
3940
3941 std::vector<T> output;
3942 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003943 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003944 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3945 { input0.data(), input1.data(), input2.data() },
3946 outputTensorInfo,
3947 output.data(),
3948 1,
3949 true);
telsoa014fcda012018-03-09 14:13:49 +00003950
3951 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3952 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3953 // Batch 0, Channel 0
3954 1.0f, 2.0f,
3955
3956 // Batch 0, Channel 1
3957 3.0f, 4.0f,
3958
3959 // Batch 0, Channel 2
3960 5.0f, 6.0f,
3961
3962 // Batch 0, Channel 3
3963 7.0f, 8.0f,
3964
3965 // Batch 0, Channel 4
3966 9.0f, 10.0f,
3967
3968 // Batch 0, Channel 5
3969 11.0f, 12.0f,
3970
3971 // Batch 0, Channel 6
3972 25.0f, 26.0f,
3973
3974 // Batch 0, Channel 7
3975 17.0f, 18.0f,
3976
3977 // Batch 1, Channel 0
3978 19.0f, 20.0f,
3979
3980 // Batch 1, Channel 1
3981 21.0f, 22.0f,
3982
3983 // Batch 1, Channel 2
3984 23.0f, 24.0f,
3985
3986 // Batch 1, Channel 3
3987 27.0f, 28.0f,
3988
3989 // Batch 1, Channel 4
3990 29.0f, 30.0f,
3991
3992 // Batch 1, Channel 5
3993 13.0f, 14.0f,
3994
3995 // Batch 1, Channel 6
3996 15.0f, 16.0f,
3997
3998 // Batch 1, Channel 7
3999 31.0f, 32.0f,
4000 }));
4001
4002 return result;
4003}
4004
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004005LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
4006 armnn::IWorkloadFactory& workloadFactory,
4007 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004008{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004009 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4010 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004011}
4012
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004013template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004014LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
4015 armnn::IWorkloadFactory& workloadFactory,
4016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004017 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004018 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004019 int32_t qOffset)
4020{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004021 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004022 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4023 // Batch 0, Channel 0
4024 1.0f, 2.0f,
4025
4026 // Batch 0, Channel 1
4027 3.0f, 4.0f,
4028
4029 // Batch 0, Channel 2
4030 5.0f, 6.0f,
4031
4032 // Batch 1, Channel 0
4033 19.0f, 20.0f,
4034
4035 // Batch 1, Channel 1
4036 21.0f, 22.0f,
4037
4038 // Batch 1, Channel 2
4039 23.0f, 24.0f
4040 }));
4041
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004042 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004043 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4044 // Batch 0, Channel 0
4045 7.0f,
4046
4047 // Batch 0, Channel 1
4048 9.0f,
4049
4050 // Batch 0, Channel 2
4051 11.0f,
4052
4053 // Batch 1, Channel 0
4054 25.0f,
4055
4056 // Batch 1, Channel 1
4057 27.0f,
4058
4059 // Batch 1, Channel 2
4060 29.0f
4061 }));
4062
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004063 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004064 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4065 // Batch 0, Channel 0
4066 13.0f, 14.0f, 50.0f,
4067
4068 // Batch 0, Channel 1
4069 15.0f, 16.0f, 51.0f,
4070
4071 // Batch 0, Channel 2
4072 17.0f, 18.0f, 52.0f,
4073
4074 // Batch 1, Channel 0
4075 31.0f, 32.0f, 53.0f,
4076
4077 // Batch 1, Channel 1
4078 33.0f, 34.0f, 54.0f,
4079
4080 // Batch 1, Channel 2
4081 35.0f, 36.0f, 55.0f,
4082 }));
4083
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004084 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004085 LayerTestResult<T, 3> result(outputTensorInfo);
4086
4087 std::vector<T> output;
4088 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004089 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004090 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4091 { input0.data(), input1.data(), input2.data() },
4092 outputTensorInfo,
4093 output.data(),
4094 2,
4095 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004096
4097 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4098 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4099 // Batch 0, Channel 0
4100 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
4101
4102 // Batch 0, Channel 1
4103 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
4104
4105 // Batch 0, Channel 2
4106 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
4107
4108 // Batch 1, Channel 0
4109 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
4110
4111 // Batch 1, Channel 1
4112 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
4113
4114 // Batch 1, Channel 2
4115 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
4116 }));
4117
4118 return result;
4119}
4120
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004121LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
4122 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4124 bool useSubtensor)
4125{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004126 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
4127 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004128}
4129
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004130template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004131LayerTestResult<T, 4> Concatenation4dTestImpl(
4132 armnn::IWorkloadFactory& workloadFactory,
4133 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4134 const armnn::TensorInfo& outputTensorInfo,
4135 unsigned int dimension,
4136 bool useSubtensor,
4137 float qScale,
4138 int32_t qOffset)
4139{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004140 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004141
4142 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4143 1.0f, 2.0f,
4144 3.0f, 4.0f,
4145 5.0f, 6.0f,
4146 7.0f, 8.0f,
4147 9.0f, 10.0f,
4148 11.0f, 12.0f
4149 }));
4150
4151 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4152 11.0f, 12.0f,
4153 13.0f, 14.0f,
4154 15.0f, 16.0f,
4155 17.0f, 18.0f,
4156 19.0f, 20.0f,
4157 21.0f, 22.0f
4158 }));
4159
4160 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4161 21.0f, 22.0f,
4162 23.0f, 24.0f,
4163 25.0f, 26.0f,
4164 27.0f, 28.0f,
4165 29.0f, 30.0f,
4166 31.0f, 32.0f
4167 }));
4168
4169 LayerTestResult<T, 4> result(outputTensorInfo);
4170
4171 std::vector<T> output;
4172 output.resize(outputTensorInfo.GetNumElements());
4173
4174 Concatenate<T>(workloadFactory,
4175 memoryManager,
4176 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
4177 {input0.data(), input1.data(), input2.data()},
4178 outputTensorInfo,
4179 output.data(),
4180 dimension,
4181 useSubtensor);
4182
4183 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4184 return result;
4185}
4186
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004187template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004188LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
4189 armnn::IWorkloadFactory& workloadFactory,
4190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4191 float qScale,
4192 int32_t qOffset)
4193{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004194 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004195
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004196 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4197 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4198
narpra015cdda352018-11-19 15:30:27 +00004199 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4200 1.0f, 2.0f,
4201 3.0f, 4.0f,
4202 5.0f, 6.0f,
4203 7.0f, 8.0f,
4204 9.0f, 10.0f,
4205 11.0f, 12.0f,
4206
4207 11.0f, 12.0f,
4208 13.0f, 14.0f,
4209 15.0f, 16.0f,
4210 17.0f, 18.0f,
4211 19.0f, 20.0f,
4212 21.0f, 22.0f,
4213
4214 21.0f, 22.0f,
4215 23.0f, 24.0f,
4216 25.0f, 26.0f,
4217 27.0f, 28.0f,
4218 29.0f, 30.0f,
4219 31.0f, 32.0f
4220 }));
4221 return result;
4222}
4223
4224LayerTestResult<float, 4> Concatenation4dDim0Test(
4225 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004227{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004228 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004229}
4230
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004231template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004232LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
4233 armnn::IWorkloadFactory& workloadFactory,
4234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4235 float qScale,
4236 int32_t qOffset)
4237{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004238 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004239
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004240 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4241 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
4242
narpra015cdda352018-11-19 15:30:27 +00004243 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4244 1.0f, 2.0f,
4245 3.0f, 4.0f,
4246 5.0f, 6.0f,
4247 7.0f, 8.0f,
4248 9.0f, 10.0f,
4249 11.0f, 12.0f,
4250
4251 11.0f, 12.0f,
4252 13.0f, 14.0f,
4253 15.0f, 16.0f,
4254 17.0f, 18.0f,
4255 19.0f, 20.0f,
4256 21.0f, 22.0f,
4257
4258 21.0f, 22.0f,
4259 23.0f, 24.0f,
4260 25.0f, 26.0f,
4261 27.0f, 28.0f,
4262 29.0f, 30.0f,
4263 31.0f, 32.0f
4264 }));
4265
4266 return result;
4267}
4268
4269LayerTestResult<float, 4> Concatenation4dDim1Test(
4270 armnn::IWorkloadFactory& workloadFactory,
4271 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4272{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004273 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004274}
4275
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004276template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004277LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
4278 armnn::IWorkloadFactory& workloadFactory,
4279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4280 float qScale,
4281 int32_t qOffset)
4282{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004283 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004284
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004285 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4286 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
4287
narpra015cdda352018-11-19 15:30:27 +00004288 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4289 1.0f, 2.0f,
4290 3.0f, 4.0f,
4291 11.0f, 12.0f,
4292 13.0f, 14.0f,
4293 21.0f, 22.0f,
4294 23.0f, 24.0f,
4295
4296 5.0f, 6.0f,
4297 7.0f, 8.0f,
4298 15.0f, 16.0f,
4299 17.0f, 18.0f,
4300 25.0f, 26.0f,
4301 27.0f, 28.0f,
4302
4303 9.0f, 10.0f,
4304 11.0f, 12.0f,
4305 19.0f, 20.0f,
4306 21.0f, 22.0f,
4307 29.0f, 30.0f,
4308 31.0f, 32.0f
4309 }));
4310
4311 return result;
4312}
4313
4314LayerTestResult<float, 4> Concatenation4dDim2Test(
4315 armnn::IWorkloadFactory& workloadFactory,
4316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4317{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004318 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004319}
4320
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004321template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004322LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
4323 armnn::IWorkloadFactory& workloadFactory,
4324 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4325 float qScale,
4326 int32_t qOffset,
4327 bool useSubtensor)
4328{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004329 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004330
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004331 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4332 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
4333
narpra015cdda352018-11-19 15:30:27 +00004334 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4335 1.0f, 2.0f,
4336 11.0f, 12.0f,
4337 21.0f, 22.0f,
4338 3.0f, 4.0f,
4339 13.0f, 14.0f,
4340 23.0f, 24.0f,
4341
4342 5.0f, 6.0f,
4343 15.0f, 16.0f,
4344 25.0f, 26.0f,
4345 7.0f, 8.0f,
4346 17.0f, 18.0f,
4347 27.0f, 28.0f,
4348
4349 9.0f, 10.0f,
4350 19.0f, 20.0f,
4351 29.0f, 30.0f,
4352 11.0f, 12.0f,
4353 21.0f, 22.0f,
4354 31.0f, 32.0f
4355 }));
4356
4357 return result;
4358}
4359
4360LayerTestResult<float, 4> Concatenation4dDim3Test(
4361 armnn::IWorkloadFactory& workloadFactory,
4362 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4363 bool useSubtensor)
4364{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004365 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
4366 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00004367}
4368
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004369template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004370LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
4371 armnn::IWorkloadFactory& workloadFactory,
4372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4373 float qScale,
4374 int32_t qOffset)
4375{
4376 unsigned int dimension = 0;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004377 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004378
4379 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4380 1.0f, 2.0f,
4381 3.0f, 4.0f,
4382 5.0f, 6.0f,
4383 7.0f, 8.0f,
4384 9.0f, 10.0f,
4385 11.0f, 12.0f
4386 }));
4387
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004388 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004389
4390 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4391 11.0f, 12.0f,
4392 13.0f, 14.0f,
4393 15.0f, 16.0f,
4394 17.0f, 18.0f,
4395 19.0f, 20.0f,
4396 21.0f, 22.0f,
4397
4398 21.0f, 22.0f,
4399 23.0f, 24.0f,
4400 25.0f, 26.0f,
4401 27.0f, 28.0f,
4402 29.0f, 30.0f,
4403 31.0f, 32.0f
4404
4405 }));
4406
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004407 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004408
4409 LayerTestResult<T, 4> result(outputTensorInfo);
4410
4411 std::vector<T> output;
4412 output.resize(outputTensorInfo.GetNumElements());
4413 Concatenate<T>(workloadFactory,
4414 memoryManager,
4415 {inputTensorInfo0, inputTensorInfo1},
4416 {input0.data(), input1.data()},
4417 outputTensorInfo,
4418 output.data(),
4419 dimension,
4420 true);
4421
4422 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4423 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4424 1.0f, 2.0f,
4425 3.0f, 4.0f,
4426 5.0f, 6.0f,
4427 7.0f, 8.0f,
4428 9.0f, 10.0f,
4429 11.0f, 12.0f,
4430
4431 11.0f, 12.0f,
4432 13.0f, 14.0f,
4433 15.0f, 16.0f,
4434 17.0f, 18.0f,
4435 19.0f, 20.0f,
4436 21.0f, 22.0f,
4437
4438 21.0f, 22.0f,
4439 23.0f, 24.0f,
4440 25.0f, 26.0f,
4441 27.0f, 28.0f,
4442 29.0f, 30.0f,
4443 31.0f, 32.0f
4444 }));
4445
4446 return result;
4447}
4448
4449LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
4450 armnn::IWorkloadFactory& workloadFactory,
4451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4452{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004453 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
4454 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004455}
4456
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004457template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004458LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
4459 armnn::IWorkloadFactory& workloadFactory,
4460 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4461 float qScale,
4462 int32_t qOffset)
4463{
4464 unsigned int dimension = 1;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004465 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004466
4467 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4468 1.0f, 2.0f,
4469 3.0f, 4.0f,
4470 5.0f, 6.0f,
4471 7.0f, 8.0f,
4472 9.0f, 10.0f,
4473 11.0f, 12.0f
4474 }));
4475
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004476 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004477
4478 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4479 11.0f, 12.0f,
4480 13.0f, 14.0f,
4481 15.0f, 16.0f,
4482 17.0f, 18.0f,
4483
4484 }));
4485
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004486 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004487
4488 LayerTestResult<T, 4> result(outputTensorInfo);
4489
4490 std::vector<T> output;
4491 output.resize(outputTensorInfo.GetNumElements());
4492 Concatenate<T>(workloadFactory,
4493 memoryManager,
4494 {inputTensorInfo0, inputTensorInfo1},
4495 {input0.data(), input1.data()},
4496 outputTensorInfo,
4497 output.data(),
4498 dimension,
4499 true);
4500
4501 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4502 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4503 1.0f, 2.0f,
4504 3.0f, 4.0f,
4505 5.0f, 6.0f,
4506 7.0f, 8.0f,
4507 9.0f, 10.0f,
4508 11.0f, 12.0f,
4509 11.0f, 12.0f,
4510 13.0f, 14.0f,
4511 15.0f, 16.0f,
4512 17.0f, 18.0f
4513 }));
4514
4515 return result;
4516}
4517
4518LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
4519 armnn::IWorkloadFactory& workloadFactory,
4520 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4521{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004522 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
4523 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004524}
4525
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004526template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004527LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
4528 armnn::IWorkloadFactory& workloadFactory,
4529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4530 float qScale,
4531 int32_t qOffset)
4532{
4533 unsigned int dimension = 2;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004534 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004535
4536 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4537 1.0f, 2.0f,
4538 3.0f, 4.0f,
4539 5.0f, 6.0f,
4540 7.0f, 8.0f,
4541 9.0f, 10.0f,
4542 11.0f, 12.0f
4543 }));
4544
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004545 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004546
4547 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4548 11.0f, 12.0f,
4549 13.0f, 14.0f,
4550 15.0f, 16.0f,
4551 17.0f, 18.0f,
4552 19.0f, 20.0f,
4553 21.0f, 22.0f,
4554 23.0f, 24.0f,
4555 25.0f, 26.0f,
4556 27.0f, 28.0f
4557 }));
4558
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004559 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004560
4561 LayerTestResult<T, 4> result(outputTensorInfo);
4562
4563 std::vector<T> output;
4564 output.resize(outputTensorInfo.GetNumElements());
4565 Concatenate<T>(workloadFactory,
4566 memoryManager,
4567 {inputTensorInfo0, inputTensorInfo1},
4568 {input0.data(), input1.data()},
4569 outputTensorInfo,
4570 output.data(),
4571 dimension,
4572 true);
4573
4574 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4575 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4576 1.0f, 2.0f,
4577 3.0f, 4.0f,
4578 11.0f, 12.0f,
4579 13.0f, 14.0f,
4580 15.0f, 16.0f,
4581
4582 5.0f, 6.0f,
4583 7.0f, 8.0f,
4584 17.0f, 18.0f,
4585 19.0f, 20.0f,
4586 21.0f, 22.0f,
4587
4588 9.0f, 10.0f,
4589 11.0f, 12.0f,
4590 23.0f, 24.0f,
4591 25.0f, 26.0f,
4592 27.0f, 28.0f
4593 }));
4594
4595 return result;
4596}
4597
4598LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
4599 armnn::IWorkloadFactory& workloadFactory,
4600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4601{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004602 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
4603 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004604}
4605
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004606template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004607LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
4608 armnn::IWorkloadFactory& workloadFactory,
4609 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4610 float qScale,
4611 int32_t qOffset,
4612 bool useSubtensor)
4613{
4614 unsigned int dimension = 3;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004615 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004616
4617 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4618 1.0f, 2.0f,
4619 3.0f, 4.0f,
4620 5.0f, 6.0f,
4621 7.0f, 8.0f,
4622 9.0f, 10.0f,
4623 11.0f, 12.0f
4624 }));
4625
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004626 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004627
4628 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4629 11.0f, 12.0f, 13.0f,
4630 14.0f, 15.0f, 16.0f,
4631
4632 17.0f, 18.0f, 19.0f,
4633 20.0f, 21.0f, 22.0f,
4634
4635 23.0f, 24.0f, 25.0f,
4636 26.0f, 27.0f, 28.0f
4637 }));
4638
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004639 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004640
4641 LayerTestResult<T, 4> result(outputTensorInfo);
4642
4643 std::vector<T> output;
4644 output.resize(outputTensorInfo.GetNumElements());
4645 Concatenate<T>(workloadFactory,
4646 memoryManager,
4647 {inputTensorInfo0, inputTensorInfo1},
4648 {input0.data(), input1.data()},
4649 outputTensorInfo,
4650 output.data(),
4651 dimension,
4652 useSubtensor);
4653
4654 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4655 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4656 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
4657 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
4658 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
4659 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
4660 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
4661 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
4662 }));
4663
4664 return result;
4665}
4666
4667LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
4668 armnn::IWorkloadFactory& workloadFactory,
4669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4670 bool useSubtensor)
4671{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004672 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
4673 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004674}
4675
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004676LayerTestResult<float, 4> ResizeBilinearNopTest(
4677 armnn::IWorkloadFactory& workloadFactory,
4678 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004679 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004680{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004681 const armnn::TensorInfo inputTensorInfo =
4682 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
4683
4684 const armnn::TensorInfo outputTensorInfo =
4685 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00004686
James Conroy6b965822018-11-01 11:33:09 +00004687 std::vector<float> inputData({
4688 1.0f, 2.0f, 3.0f, 4.0f,
4689 2.0f, 3.0f, 4.0f, 5.0f,
4690 3.0f, 4.0f, 5.0f, 6.0f,
4691 4.0f, 5.0f, 6.0f, 7.0f,
4692
telsoa014fcda012018-03-09 14:13:49 +00004693 1.0f, 2.0f, 3.0f, 4.0f,
4694 2.0f, 3.0f, 4.0f, 5.0f,
4695 3.0f, 4.0f, 5.0f, 6.0f,
4696 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00004697 });
4698
4699 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004700 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004701 {
4702 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004703 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004704 inputData = tmp;
4705 }
4706
4707 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004708
4709 LayerTestResult<float, 4> result(outputTensorInfo);
4710 result.outputExpected = input;
4711
4712 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4713 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4714
4715 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004716 descriptor.m_Parameters.m_DataLayout = dataLayout;
4717 armnn::WorkloadInfo info;
4718 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4719 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4720
4721 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4722
4723 inputHandle->Allocate();
4724 outputHandle->Allocate();
4725 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4726
James Conroy074f3712018-10-03 09:32:03 +01004727 workload->Execute();
4728
4729 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4730 return result;
4731}
4732
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004733LayerTestResult<float, 4> SimpleResizeBilinearTest(
4734 armnn::IWorkloadFactory& workloadFactory,
4735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004736 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01004737{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004738 const armnn::TensorInfo inputTensorInfo =
4739 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
4740
4741 const armnn::TensorInfo outputTensorInfo =
4742 armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
James Conroy074f3712018-10-03 09:32:03 +01004743
James Conroy6b965822018-11-01 11:33:09 +00004744 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004745 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00004746 200.0f, 250.0f,
4747
4748 250.0f, 200.0f,
4749 250.0f, 1.0f
4750 });
James Conroy074f3712018-10-03 09:32:03 +01004751
4752 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
4753 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00004754 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
4755 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
4756 // which we would expect if projecting the centre).
4757
4758 std::vector<float> outputData({
4759 1.0f,
4760
4761 250.0f
4762 });
4763
4764 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004765 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004766 {
4767 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004768 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004769 inputData = tmp;
4770
4771 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004772 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004773 outputData = tmp1;
4774 }
4775
4776 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
4777
James Conroy074f3712018-10-03 09:32:03 +01004778 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004779 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01004780
4781 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4782 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4783
4784 armnn::ResizeBilinearQueueDescriptor descriptor;
4785 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004786 armnn::WorkloadInfo info;
4787 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4788 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4789
4790 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4791
4792 inputHandle->Allocate();
4793 outputHandle->Allocate();
4794 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4795
4796 workload->Execute();
4797
4798 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4799 return result;
4800}
4801
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004802LayerTestResult<float, 4> ResizeBilinearSqMinTest(
4803 armnn::IWorkloadFactory& workloadFactory,
4804 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004805 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004806{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004807 const armnn::TensorInfo inputTensorInfo =
4808 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
4809
4810 const armnn::TensorInfo outputTensorInfo =
4811 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00004812
James Conroy6b965822018-11-01 11:33:09 +00004813 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004814 1.0f, 2.0f, 3.0f, 4.0f,
4815 2.0f, 3.0f, 4.0f, 5.0f,
4816 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00004817 4.0f, 5.0f, 6.0f, 7.0f,
4818
4819 7.0f, 6.0f, 5.0f, 4.0f,
4820 6.0f, 5.0f, 4.0f, 3.0f,
4821 5.0f, 4.0f, 3.0f, 2.0f,
4822 4.0f, 3.0f, 2.0f, 1.0f
4823 });
4824
4825 std::vector<float> outputData({
4826 1.0f, 3.0f,
4827 3.0f, 5.0f,
4828
4829 7.0f, 5.0f,
4830 5.0f, 3.0f
4831 });
4832
4833 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004834 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004835 {
4836 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004837 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004838 inputData = tmp;
4839
4840 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004841 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004842 outputData = tmp1;
4843 }
4844
4845 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004846
telsoa014fcda012018-03-09 14:13:49 +00004847 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004848 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004849
4850 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4851 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4852
4853 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004854 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004855 armnn::WorkloadInfo info;
4856 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4857 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4858
4859 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4860
4861 inputHandle->Allocate();
4862 outputHandle->Allocate();
4863 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4864
4865 workload->Execute();
4866
4867 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4868 return result;
4869}
4870
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004871LayerTestResult<float, 4> ResizeBilinearMinTest(
4872 armnn::IWorkloadFactory& workloadFactory,
4873 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004874 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004875{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004876 const armnn::TensorInfo inputTensorInfo =
4877 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
4878
4879 const armnn::TensorInfo outputTensorInfo =
4880 armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00004881
James Conroy6b965822018-11-01 11:33:09 +00004882 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004883 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
4884 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00004885 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
4886
4887 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
4888 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
4889 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
4890 });
4891
4892 std::vector<float> outputData({
4893 1.0f, 2.6666f, 6.00f,
4894 78.5f, 179.3333f, 401.00f,
4895
4896 987.0f, 454.6670f, 203.33f,
4897 48.5f, 22.3333f, 10.00f
4898 });
4899
4900 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004901 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004902 {
4903 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004904 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004905 inputData = tmp;
4906
4907 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004908 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004909 outputData = tmp1;
4910 }
4911
4912 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004913
4914 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004915 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004916
4917 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4918 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4919
4920 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004921 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004922 armnn::WorkloadInfo info;
4923 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4924 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4925
4926 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4927
4928 inputHandle->Allocate();
4929 outputHandle->Allocate();
4930 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4931
4932 workload->Execute();
4933
4934 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4935 return result;
4936}
4937
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004938LayerTestResult<float, 4> ResizeBilinearMagTest(
4939 armnn::IWorkloadFactory& workloadFactory,
4940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004941 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004942{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004943 const armnn::TensorInfo inputTensorInfo =
4944 armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
4945
4946 const armnn::TensorInfo outputTensorInfo =
4947 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00004948
James Conroy6b965822018-11-01 11:33:09 +00004949 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004950 1.0f, 2.0f,
4951 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00004952 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00004953
James Conroy6b965822018-11-01 11:33:09 +00004954 233.0f, 144.0f,
4955 21.0f, 13.0f,
4956 2.0f, 1.0f
4957 });
4958
4959 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01004960 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
4961 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00004962 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
4963
4964 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
4965 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
4966 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
4967 });
4968
4969 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004970 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004971 {
4972 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004973 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004974 inputData = tmp;
4975
4976 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004977 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004978 outputData = tmp1;
4979 }
4980
4981 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
4982
4983 LayerTestResult<float, 4> result(outputTensorInfo);
4984 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004985
4986 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4987 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4988
4989 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004990 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004991 armnn::WorkloadInfo info;
4992 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4993 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4994
4995 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4996
4997 inputHandle->Allocate();
4998 outputHandle->Allocate();
4999 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5000
5001 workload->Execute();
5002
5003 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5004 return result;
5005}
5006
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005007LayerTestResult<float, 2> FakeQuantizationTest(
5008 armnn::IWorkloadFactory& workloadFactory,
5009 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005010{
5011 constexpr unsigned int width = 2;
5012 constexpr unsigned int height = 3;
5013
5014 const armnn::TensorInfo tensorInfo({height, width },
5015 armnn::DataType::Float32);
5016 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5017 -10.0f, -5.0f,
5018 0.0f, 5.0f,
5019 10.0f, 10.0f
5020 }));
5021
5022 LayerTestResult<float, 2> ret(tensorInfo);
5023
5024 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5025
5026 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5027
5028 armnn::FakeQuantizationQueueDescriptor data;
5029 armnn::WorkloadInfo info;
5030
5031 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5032 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5033 float min = -10.f;
5034 float max = 10.f;
5035
5036 data.m_Parameters.m_Min = min;
5037 data.m_Parameters.m_Max = max;
5038
5039 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5040 armnn::FakeQuantizationQueueDescriptor refData = data;
5041 armnn::WorkloadInfo refInfo = info;
5042 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5043
5044 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5045
5046 inputHandle->Allocate();
5047 outputHandle->Allocate();
5048
5049 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5050
5051 workload->Execute();
5052
5053 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5054
5055 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5056 0.0f, 63.0f,
5057 128.0f, 191.0f,
5058 255.0f, 255.0f
5059 }));
5060 return ret;
5061}
5062
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005063namespace
5064{
5065
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005066LayerTestResult<float, 4> L2NormalizationTestImpl(
5067 armnn::IWorkloadFactory& workloadFactory,
5068 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5069 const armnn::TensorShape& inputOutputTensorShape,
5070 const std::vector<float>& inputValues,
5071 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00005072 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005073{
5074 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5075 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5076
jimfly013aab7c32018-11-12 13:32:08 +00005077 // at this point if we require it permute the input data
5078 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5079 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005080 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005081 {
5082 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005083 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005084 inputData = tmp;
5085 }
5086
5087 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005088
5089 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00005090 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005091 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005092 {
5093 std::vector<float> tmp(expectedOutputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005094 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
5095 expectedOutputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005096 expectedOutputData = tmp;
5097 }
5098 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005099
5100 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5101 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5102
5103 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00005104 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005105 armnn::WorkloadInfo info;
5106
5107 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5108 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5109
5110 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5111
5112 inputHandle->Allocate();
5113 outputHandle->Allocate();
5114
5115 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5116
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005117 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005118
5119 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5120
5121 return result;
5122}
5123
5124float CalcInvL2Norm(std::initializer_list<float> elements)
5125{
5126 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5127 [](float acc, float element) { return acc + element * element; });
5128 return 1.0f / sqrtf(reduction);
5129}
5130
5131} // anonymous namespace
5132
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005133template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005134LayerTestResult<T, 2> Pad2dTestCommon(
5135 armnn::IWorkloadFactory& workloadFactory,
5136 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5137 float qScale,
5138 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005139{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005140 const armnn::TensorShape inputShape{ 3, 3 };
5141 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005142
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005143 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5144 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005145
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005146 std::vector<T> inputValues(
5147 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005148 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005149 // Height (3) x Width (3)
5150 4, 8, 6,
5151 7, 4, 4,
5152 3, 2, 4
5153 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005154
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005155 std::vector<T> expectedOutputValues(
5156 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005157 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005158 0, 0, 0, 0, 0, 0, 0,
5159 0, 0, 0, 0, 0, 0, 0,
5160 0, 0, 4, 8, 6, 0, 0,
5161 0, 0, 7, 4, 4, 0, 0,
5162 0, 0, 3, 2, 4, 0, 0,
5163 0, 0, 0, 0, 0, 0, 0,
5164 0, 0, 0, 0, 0, 0, 0
5165 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005166
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005167 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005168
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005169 LayerTestResult<T, 2> result(outputTensorInfo);
5170 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005171
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005172 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5173 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005174
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005175 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005176
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005177 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5178 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5179 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005180
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005181 descriptor.m_Parameters.m_PadList = PadList;
5182 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005183
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005184 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5185 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005186
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005187 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005188
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005189 inputHandle->Allocate();
5190 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005191
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005192 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005193
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005194 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005195
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005196 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005197
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005198 return result;
5199}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005200
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005201template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005202LayerTestResult<T, 3> Pad3dTestCommon(
5203 armnn::IWorkloadFactory& workloadFactory,
5204 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5205 float qScale,
5206 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005207{
5208 const armnn::TensorShape inputShape{ 2, 2, 2 };
5209 const armnn::TensorShape outputShape{ 3, 5, 6 };
5210
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005211 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5212 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005213
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005214 std::vector<T> inputValues(
5215 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005216 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005217 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005218 0, 4,
5219 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005220
5221 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005222 6, 1,
5223 5, 2
5224 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005225
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005226 std::vector<T> expectedOutputValues(
5227 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005228 {
5229
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005230 0, 0, 0, 0, 0, 0,
5231 0, 0, 0, 0, 0, 0,
5232 0, 0, 0, 4, 0, 0,
5233 0, 0, 2, 5, 0, 0,
5234 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005235
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005236 0, 0, 0, 0, 0, 0,
5237 0, 0, 0, 0, 0, 0,
5238 0, 0, 6, 1, 0, 0,
5239 0, 0, 5, 2, 0, 0,
5240 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005241
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005242 0, 0, 0, 0, 0, 0,
5243 0, 0, 0, 0, 0, 0,
5244 0, 0, 0, 0, 0, 0,
5245 0, 0, 0, 0, 0, 0,
5246 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005247
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005248 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005249
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005250 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005251
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005252 LayerTestResult<T, 3> result(outputTensorInfo);
5253 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005254
5255 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5256 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5257
5258 armnn::PadQueueDescriptor descriptor;
5259
5260 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5261 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5262 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5263 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5264
5265 descriptor.m_Parameters.m_PadList = PadList;
5266 armnn::WorkloadInfo info;
5267
5268 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5269 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5270
5271 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5272
5273 inputHandle->Allocate();
5274 outputHandle->Allocate();
5275
5276 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
5277
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005278 workload->Execute();
5279
5280 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
5281
5282 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005283}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005284
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005285template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005286LayerTestResult<T, 4> Pad4dTestCommon(
5287 armnn::IWorkloadFactory& workloadFactory,
5288 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5289 float qScale,
5290 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005291{
5292 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
5293 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
5294
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005295 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5296 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005297
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005298 std::vector<T> inputValues(
5299 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005300 {
5301 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005302 0, 1,
5303 2, 3,
5304 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005305
5306 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005307 6, 7,
5308 8, 9,
5309 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005310
5311 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005312 12, 13,
5313 14, 15,
5314 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005315
5316 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005317 18, 19,
5318 20, 21,
5319 22, 23
5320 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005321
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005322 std::vector<T> expectedOutputValues(
5323 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005324 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005325 0, 0, 0, 0,
5326 0, 0, 0, 0,
5327 0, 0, 0, 0,
5328 0, 0, 0, 0,
5329 0, 0, 0, 0,
5330 0, 0, 0, 0,
5331 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005332
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005333 0, 0, 0, 0,
5334 0, 0, 0, 0,
5335 0, 0, 0, 0,
5336 0, 0, 0, 0,
5337 0, 0, 0, 0,
5338 0, 0, 0, 0,
5339 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005340
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005341 0, 0, 0, 0,
5342 0, 0, 0, 0,
5343 0, 0, 0, 0,
5344 0, 0, 0, 0,
5345 0, 0, 0, 0,
5346 0, 0, 0, 0,
5347 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005348
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005349 0, 0, 0, 0,
5350 0, 0, 0, 0,
5351 0, 0, 0, 0,
5352 0, 0, 0, 0,
5353 0, 0, 0, 0,
5354 0, 0, 0, 0,
5355 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005356
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005357 0, 0, 0, 0,
5358 0, 0, 0, 0,
5359 0, 0, 0, 0,
5360 0, 0, 0, 0,
5361 0, 0, 0, 0,
5362 0, 0, 0, 0,
5363 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005364
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005365 0, 0, 0, 0,
5366 0, 0, 0, 0,
5367 0, 0, 0, 0,
5368 0, 0, 0, 0,
5369 0, 0, 0, 0,
5370 0, 0, 0, 0,
5371 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005372
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005373 0, 0, 0, 0,
5374 0, 0, 0, 0,
5375 0, 0, 0, 0,
5376 0, 0, 0, 0,
5377 0, 0, 0, 0,
5378 0, 0, 0, 0,
5379 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005380
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005381 0, 0, 0, 0,
5382 0, 0, 0, 0,
5383 0, 0, 0, 0,
5384 0, 0, 1, 0,
5385 0, 2, 3, 0,
5386 0, 4, 5, 0,
5387 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005388
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005389 0, 0, 0, 0,
5390 0, 0, 0, 0,
5391 0, 0, 0, 0,
5392 0, 6, 7, 0,
5393 0, 8, 9, 0,
5394 0, 10, 11, 0,
5395 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005396
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005397 0, 0, 0, 0,
5398 0, 0, 0, 0,
5399 0, 0, 0, 0,
5400 0, 0, 0, 0,
5401 0, 0, 0, 0,
5402 0, 0, 0, 0,
5403 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005404
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005405 0, 0, 0, 0,
5406 0, 0, 0, 0,
5407 0, 0, 0, 0,
5408 0, 0, 0, 0,
5409 0, 0, 0, 0,
5410 0, 0, 0, 0,
5411 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005412
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005413 0, 0, 0, 0,
5414 0, 0, 0, 0,
5415 0, 0, 0, 0,
5416 0, 0, 0, 0,
5417 0, 0, 0, 0,
5418 0, 0, 0, 0,
5419 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005420
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005421 0, 0, 0, 0,
5422 0, 0, 0, 0,
5423 0, 0, 0, 0,
5424 0, 12, 13, 0,
5425 0, 14, 15, 0,
5426 0, 16, 17, 0,
5427 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005428
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005429 0, 0, 0, 0,
5430 0, 0, 0, 0,
5431 0, 0, 0, 0,
5432 0, 18, 19, 0,
5433 0, 20, 21, 0,
5434 0, 22, 23, 0,
5435 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005436
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005437 0, 0, 0, 0,
5438 0, 0, 0, 0,
5439 0, 0, 0, 0,
5440 0, 0, 0, 0,
5441 0, 0, 0, 0,
5442 0, 0, 0, 0,
5443 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005444
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005445 0, 0, 0, 0,
5446 0, 0, 0, 0,
5447 0, 0, 0, 0,
5448 0, 0, 0, 0,
5449 0, 0, 0, 0,
5450 0, 0, 0, 0,
5451 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005452
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005453 0, 0, 0, 0,
5454 0, 0, 0, 0,
5455 0, 0, 0, 0,
5456 0, 0, 0, 0,
5457 0, 0, 0, 0,
5458 0, 0, 0, 0,
5459 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005460
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005461 0, 0, 0, 0,
5462 0, 0, 0, 0,
5463 0, 0, 0, 0,
5464 0, 0, 0, 0,
5465 0, 0, 0, 0,
5466 0, 0, 0, 0,
5467 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005468
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005469 0, 0, 0, 0,
5470 0, 0, 0, 0,
5471 0, 0, 0, 0,
5472 0, 0, 0, 0,
5473 0, 0, 0, 0,
5474 0, 0, 0, 0,
5475 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005476
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005477 0, 0, 0, 0,
5478 0, 0, 0, 0,
5479 0, 0, 0, 0,
5480 0, 0, 0, 0,
5481 0, 0, 0, 0,
5482 0, 0, 0, 0,
5483 0, 0, 0, 0
5484 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005485
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005486 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005487
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005488 LayerTestResult<T, 4> result(outputTensorInfo);
5489 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005490
5491 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5492 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5493
5494 armnn::PadQueueDescriptor descriptor;
5495
5496 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5497 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5498 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5499 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
5500 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5501
5502 descriptor.m_Parameters.m_PadList = PadList;
5503 armnn::WorkloadInfo info;
5504
5505 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5506 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5507
5508 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5509
5510 inputHandle->Allocate();
5511 outputHandle->Allocate();
5512
5513 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5514
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005515 workload->Execute();
5516
5517 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5518
5519 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005520}
5521
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005522LayerTestResult<uint8_t, 2> PadUint82dTest(
5523 armnn::IWorkloadFactory& workloadFactory,
5524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005525{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005526 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005527}
5528
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005529LayerTestResult<uint8_t, 3> PadUint83dTest(
5530 armnn::IWorkloadFactory& workloadFactory,
5531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005532{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005533 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005534}
5535
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005536LayerTestResult<uint8_t, 4> PadUint84dTest(
5537 armnn::IWorkloadFactory& workloadFactory,
5538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005539{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005540 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005541}
5542
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005543LayerTestResult<float, 2> PadFloat322dTest(
5544 armnn::IWorkloadFactory& workloadFactory,
5545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005546{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005547 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005548}
5549
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005550LayerTestResult<float, 3> PadFloat323dTest(
5551 armnn::IWorkloadFactory& workloadFactory,
5552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005553{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005554 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005555}
5556
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005557LayerTestResult<float, 4> PadFloat324dTest(
5558 armnn::IWorkloadFactory& workloadFactory,
5559 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005560{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005561 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005562}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005563
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005564LayerTestResult<float, 4> L2Normalization1dTest(
5565 armnn::IWorkloadFactory& workloadFactory,
5566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005567 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005568{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005569 // Width: 1
5570 // Height: 1
5571 // Channels: 10
5572 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005573 unsigned int numberOfBatches = 1;
5574 unsigned int numberOfChannels = 10;
5575 unsigned int height = 1;
5576 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00005577
jimfly013aab7c32018-11-12 13:32:08 +00005578
Nina Drozdd41b2592018-11-19 13:03:36 +00005579 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005580 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005581 std::vector<float> inputValues
5582 {
5583 // Batch 0, Channel 0, Height (1) x Width (1)
5584 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00005585
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005586 // Batch 0, Channel 1, Height (1) x Width (1)
5587 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00005588
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005589 // Batch 0, Channel 2, Height (1) x Width (1)
5590 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00005591
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005592 // Batch 0, Channel 3, Height (1) x Width (1)
5593 4.0f,
5594
5595 // Batch 0, Channel 4, Height (1) x Width (1)
5596 5.0f,
5597
5598 // Batch 0, Channel 5, Height (1) x Width (1)
5599 6.0f,
5600
5601 // Batch 0, Channel 6, Height (1) x Width (1)
5602 7.0f,
5603
5604 // Batch 0, Channel 7, Height (1) x Width (1)
5605 8.0f,
5606
5607 // Batch 0, Channel 8, Height (1) x Width (1)
5608 9.0f,
5609
5610 // Batch 0, Channel 9, Height (1) x Width (1)
5611 10.0f
5612 };
telsoa014fcda012018-03-09 14:13:49 +00005613 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005614 std::vector<float> expectedOutputValues
5615 {
5616 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00005617 1.0f * approxInvL2Norm,
5618 2.0f * approxInvL2Norm,
5619 3.0f * approxInvL2Norm,
5620 4.0f * approxInvL2Norm,
5621 5.0f * approxInvL2Norm,
5622 6.0f * approxInvL2Norm,
5623 7.0f * approxInvL2Norm,
5624 8.0f * approxInvL2Norm,
5625 9.0f * approxInvL2Norm,
5626 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005627 };
telsoa014fcda012018-03-09 14:13:49 +00005628
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005629
5630 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005631 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005632}
5633
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005634LayerTestResult<float, 4> L2Normalization2dTest(
5635 armnn::IWorkloadFactory& workloadFactory,
5636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005637 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005638{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005639 // Width: 5
5640 // Height: 1
5641 // Channels: 2
5642 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005643 unsigned int numberOfBatches = 1;
5644 unsigned int numberOfChannels = 2;
5645 unsigned int height = 1;
5646 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00005647
Nina Drozdd41b2592018-11-19 13:03:36 +00005648 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005649 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005650 std::vector<float> inputValues
5651 {
5652 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00005653 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00005654
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005655 // Batch 0, Channel 1, Height (1) x Width (5)
5656 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
5657 };
5658 std::vector<float> expectedOutputValues
5659 {
5660 // Batch 0, Channel 0, Height (1) x Width (5)
5661 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5662 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5663 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5664 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005665 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
5666
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005667 // Batch 0, Channel 1, Height (1) x Width (5)
5668 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5669 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5670 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5671 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005672 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005673 };
telsoa014fcda012018-03-09 14:13:49 +00005674
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005675 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005676 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005677}
telsoa014fcda012018-03-09 14:13:49 +00005678
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005679LayerTestResult<float, 4> L2Normalization3dTest(
5680 armnn::IWorkloadFactory& workloadFactory,
5681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005682 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005683{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005684 // Width: 3
5685 // Height: 4
5686 // Channels: 2
5687 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005688 unsigned int numberOfBatches = 1;
5689 unsigned int numberOfChannels = 2;
5690 unsigned int height = 4;
5691 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005692
Nina Drozdd41b2592018-11-19 13:03:36 +00005693 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005694 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005695 std::vector<float> inputValues
5696 {
5697 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005698 119.0f, 21.0f, 150.0f,
5699 149.0f, 32.0f, 179.0f,
5700 15.0f, 227.0f, 141.0f,
5701 147.0f, 199.0f, 220.0f,
5702
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005703 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005704 110.0f, 140.0f, 73.0f,
5705 211.0f, 212.0f, 89.0f,
5706 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005707 162.0f, 12.0f, 161.0f
5708 };
5709 std::vector<float> expectedOutputValues
5710 {
5711 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005712 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5713 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5714 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5715 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5716 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5717 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5718 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5719 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5720 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5721 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
5722 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
5723 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
5724
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005725 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005726 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5727 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5728 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5729 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5730 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5731 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5732 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5733 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5734 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5735 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
5736 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005737 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
5738 };
telsoa014fcda012018-03-09 14:13:49 +00005739
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005740 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005741 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005742}
telsoa014fcda012018-03-09 14:13:49 +00005743
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005744LayerTestResult<float, 4> L2Normalization4dTest(
5745 armnn::IWorkloadFactory& workloadFactory,
5746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005747 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005748{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005749 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005750 // Height: 4
5751 // Channels: 3
5752 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00005753 unsigned int numberOfBatches = 2;
5754 unsigned int numberOfChannels = 3;
5755 unsigned int height = 4;
5756 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005757
Nina Drozdd41b2592018-11-19 13:03:36 +00005758 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005759 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005760 std::vector<float> inputValues
5761 {
5762 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005763 235.0f, 46.0f, 178.0f,
5764 100.0f, 123.0f, 19.0f,
5765 172.0f, 74.0f, 250.0f,
5766 6.0f, 195.0f, 80.0f,
5767
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005768 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005769 113.0f, 95.0f, 202.0f,
5770 77.0f, 114.0f, 71.0f,
5771 122.0f, 246.0f, 166.0f,
5772 82.0f, 28.0f, 37.0f,
5773
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005774 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005775 56.0f, 170.0f, 162.0f,
5776 194.0f, 89.0f, 254.0f,
5777 12.0f, 209.0f, 200.0f,
5778 1.0f, 64.0f, 54.0f,
5779
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005780 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005781 67.0f, 90.0f, 49.0f,
5782 7.0f, 163.0f, 18.0f,
5783 25.0f, 117.0f, 103.0f,
5784 247.0f, 59.0f, 189.0f,
5785
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005786 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005787 239.0f, 104.0f, 199.0f,
5788 17.0f, 124.0f, 153.0f,
5789 222.0f, 217.0f, 75.0f,
5790 32.0f, 126.0f, 21.0f,
5791
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005792 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005793 97.0f, 145.0f, 215.0f,
5794 115.0f, 116.0f, 238.0f,
5795 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005796 92.0f, 125.0f, 88.0f
5797 };
5798 std::vector<float> expectedOutputValues
5799 {
5800 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005801 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5802 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5803 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5804 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5805 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5806 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5807 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5808 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5809 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5810 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5811 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5812 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5813
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005814 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005815 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5816 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5817 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5818 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5819 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5820 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5821 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5822 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5823 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5824 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5825 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5826 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5827
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005828 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005829 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5830 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5831 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5832 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5833 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5834 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5835 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5836 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5837 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5838 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5839 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5840 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5841
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005842 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005843 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5844 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5845 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5846 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5847 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5848 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5849 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5850 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5851 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5852 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5853 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
5854 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
5855
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005856 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005857 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5858 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5859 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5860 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5861 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5862 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5863 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5864 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5865 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5866 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5867 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
5868 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
5869
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005870 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005871 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5872 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5873 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5874 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5875 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5876 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5877 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5878 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5879 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5880 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5881 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005882 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
5883 };
telsoa014fcda012018-03-09 14:13:49 +00005884
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005885 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005886 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005887}
5888
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005889template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005890LayerTestResult<T, 4> ConstantTestImpl(
5891 armnn::IWorkloadFactory& workloadFactory,
5892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00005893 float qScale,
5894 int32_t qOffset)
5895{
5896 constexpr unsigned int inputWidth = 3;
5897 constexpr unsigned int inputHeight = 4;
5898 constexpr unsigned int inputChannels = 3;
5899 constexpr unsigned int inputBatchSize = 2;
5900
5901 constexpr unsigned int outputWidth = inputWidth;
5902 constexpr unsigned int outputHeight = inputHeight;
5903 constexpr unsigned int outputChannels = inputChannels;
5904 constexpr unsigned int outputBatchSize = inputBatchSize;
5905
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005906 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005907
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005908 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005909
5910 // Set quantization parameters if the requested type is a quantized type.
5911 if(armnn::IsQuantizedType<T>())
5912 {
5913 inputTensorInfo.SetQuantizationScale(qScale);
5914 inputTensorInfo.SetQuantizationOffset(qOffset);
5915 outputTensorInfo.SetQuantizationScale(qScale);
5916 outputTensorInfo.SetQuantizationOffset(qOffset);
5917 }
5918
5919 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
5920 QuantizedVector<T>(qScale, qOffset, {
5921 // Batch 0, Channel 0
5922 235.0f, 46.0f, 178.0f,
5923 100.0f, 123.0f, 19.0f,
5924 172.0f, 74.0f, 250.0f,
5925 6.0f, 195.0f, 80.0f,
5926
5927 // Batch 0, Channel 1
5928 113.0f, 95.0f, 202.0f,
5929 77.0f, 114.0f, 71.0f,
5930 122.0f, 246.0f, 166.0f,
5931 82.0f, 28.0f, 37.0f,
5932
5933 // Batch 0, Channel 2
5934 56.0f, 170.0f, 162.0f,
5935 194.0f, 89.0f, 254.0f,
5936 12.0f, 209.0f, 200.0f,
5937 1.0f, 64.0f, 54.0f,
5938
5939 // Batch 1, Channel 0
5940 67.0f, 90.0f, 49.0f,
5941 7.0f, 163.0f, 18.0f,
5942 25.0f, 117.0f, 103.0f,
5943 247.0f, 59.0f, 189.0f,
5944
5945 // Batch 1, Channel 1
5946 239.0f, 104.0f, 199.0f,
5947 17.0f, 124.0f, 153.0f,
5948 222.0f, 217.0f, 75.0f,
5949 32.0f, 126.0f, 21.0f,
5950
5951 // Batch 1, Channel 2
5952 97.0f, 145.0f, 215.0f,
5953 115.0f, 116.0f, 238.0f,
5954 226.0f, 16.0f, 132.0f,
5955 92.0f, 125.0f, 88.0f,
5956 })));
5957
5958 LayerTestResult<T, 4> result(outputTensorInfo);
5959 result.outputExpected = input;
5960
5961 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5962
5963 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
5964 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
5965
5966 armnn::ConstantQueueDescriptor descriptor;
5967 descriptor.m_LayerOutput = &constantTensor;
5968
5969 armnn::WorkloadInfo info;
5970 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5971
5972 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
5973
5974 outputHandle->Allocate();
5975
5976 workload->Execute();
5977
5978 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5979 return result;
5980}
5981
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005982LayerTestResult<float, 4> ConstantTest(
5983 armnn::IWorkloadFactory& workloadFactory,
5984 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005985{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005986 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005987}
5988
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005989LayerTestResult<uint8_t, 4> ConstantTestUint8(
5990 armnn::IWorkloadFactory& workloadFactory,
5991 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005992{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005993 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005994}
5995
Ferran Balaguerb2845652019-02-27 09:42:06 +00005996LayerTestResult<uint8_t, 3> MergerUint8DifferentQParamsTest(
5997 armnn::IWorkloadFactory& workloadFactory,
5998 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5999{
6000 unsigned int outputWidth = 3;
6001 unsigned int outputHeight = 6;
6002 unsigned int outputChannels = 3;
6003
6004 unsigned int inputWidth1 = 3;
6005 unsigned int inputHeight1 = 6;
6006 unsigned int inputChannels1 = 2;
6007
6008 unsigned int inputWidth2 = 3;
6009 unsigned int inputHeight2 = 6;
6010 unsigned int inputChannels2 = 1;
6011
6012 // Defines the tensor descriptors.
6013 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6014 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6015 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6016
6017 // Quantized input1 tensor. Range [-3, 1]
6018 const float inputScale1 = 0.015686f;
6019 const int32_t inputOffset1 = 192;
6020
6021 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6022 {
6023 1, 2, 3,
6024 4, 5, 6,
6025 7, 8, 9,
6026 10, 11, 12,
6027 13, 14, 15,
6028 16, 17, 18,
6029
6030 19, 20, 21,
6031 22, 23, 24,
6032 25, 26, 27,
6033 28, 29, 30,
6034 31, 32, 33,
6035 34, 35, 36,
6036 })
6037 );
6038
6039 // Quatized input2 tensor. Range [-1, 4]
6040 const float inputScale2 = 0.019608f;
6041 const int32_t inputOffset2 = 50;
6042
6043 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6044 {
6045 37, 38, 39,
6046 40, 41, 42,
6047 43, 44, 45,
6048 46, 47, 48,
6049 49, 50, 51,
6050 52, 53, 54,
6051 })
6052 );
6053
6054 // Output has the same quantization parameters than input1,
6055 // so that only the requantization of input2 is required
6056 const float outputScale = 0.015686f;
6057 const int32_t outputOffset = 192;
6058
6059 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6060
6061 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
6062 {
6063 1, 2, 3,
6064 4, 5, 6,
6065 7, 8, 9,
6066 10, 11, 12,
6067 13, 14, 15,
6068 16, 17, 18,
6069
6070 19, 20, 21,
6071 22, 23, 24,
6072 25, 26, 27,
6073 28, 29, 30,
6074 31, 32, 33,
6075 34, 35, 36,
6076
6077 176, 177, 178,
6078 179, 181, 182,
6079 183, 184, 186,
6080 187, 188, 189,
6081 191, 192, 193,
6082 195, 196, 197,
6083 })
6084 );
6085
6086 outputTensorInfo.SetQuantizationScale(outputScale);
6087 outputTensorInfo.SetQuantizationOffset(outputOffset);
6088 inputTensorInfo1.SetQuantizationScale(inputScale1);
6089 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
6090 inputTensorInfo2.SetQuantizationScale(inputScale2);
6091 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
6092
6093 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
6094 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6095
6096 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
6097 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6098
6099 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6100
6101 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6102
6103 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6104 subTensorsSupported ?
6105 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6106 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6107
6108 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6109 subTensorsSupported ?
6110 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6111 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6112
6113 armnn::MergerQueueDescriptor data;
6114 armnn::WorkloadInfo info;
6115 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6116 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6117 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6118
6119 data.m_ViewOrigins.push_back(window1);
6120 data.m_ViewOrigins.push_back(window2);
6121
6122 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6123
6124 inputHandle1->Allocate();
6125 inputHandle2->Allocate();
6126 outputHandle->Allocate();
6127
6128 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6129 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6130
6131 workload->Execute();
6132
6133 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6134
6135 return ret;
6136}
6137
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006138LayerTestResult<uint8_t, 3> MergerUint8Test(
6139 armnn::IWorkloadFactory& workloadFactory,
6140 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006141{
surmeh013537c2c2018-05-18 16:31:43 +01006142 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00006143 unsigned int outputHeight = 6;
6144 unsigned int outputChannels = 3;
6145
surmeh013537c2c2018-05-18 16:31:43 +01006146 unsigned int inputWidth1 = 3;
6147 unsigned int inputHeight1 = 6;
6148 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00006149
surmeh013537c2c2018-05-18 16:31:43 +01006150 unsigned int inputWidth2 = 3;
6151 unsigned int inputHeight2 = 6;
6152 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00006153
telsoa01c577f2c2018-08-31 09:22:23 +01006154 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00006155 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6156 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6157 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006158
telsoa01c577f2c2018-08-31 09:22:23 +01006159 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00006160 const float scale = 0.13497836f;
6161 const int32_t offset = -7;
6162
6163 outputTensorInfo.SetQuantizationScale(scale);
6164 outputTensorInfo.SetQuantizationOffset(offset);
6165 inputTensorInfo1.SetQuantizationScale(scale);
6166 inputTensorInfo1.SetQuantizationOffset(offset);
6167 inputTensorInfo2.SetQuantizationScale(scale);
6168 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00006169
6170 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6171
6172 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01006173 {
6174 1, 2, 3,
6175 4, 5, 6,
6176 7, 8, 9,
6177 10, 11, 12,
6178 13, 14, 15,
6179 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006180
surmeh013537c2c2018-05-18 16:31:43 +01006181 19, 20, 21,
6182 22, 23, 24,
6183 25, 26, 27,
6184 28, 29, 30,
6185 31, 32, 33,
6186 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006187
surmeh013537c2c2018-05-18 16:31:43 +01006188 37, 38, 39,
6189 40, 41, 42,
6190 43, 44, 45,
6191 46, 47, 48,
6192 49, 50, 51,
6193 52, 53, 54,
6194 })
telsoa014fcda012018-03-09 14:13:49 +00006195 );
6196
telsoa014fcda012018-03-09 14:13:49 +00006197 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6198 {
surmeh013537c2c2018-05-18 16:31:43 +01006199 1, 2, 3,
6200 4, 5, 6,
6201 7, 8, 9,
6202 10, 11, 12,
6203 13, 14, 15,
6204 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006205
surmeh013537c2c2018-05-18 16:31:43 +01006206 19, 20, 21,
6207 22, 23, 24,
6208 25, 26, 27,
6209 28, 29, 30,
6210 31, 32, 33,
6211 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006212 })
6213 );
6214
6215 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6216 {
surmeh013537c2c2018-05-18 16:31:43 +01006217 37, 38, 39,
6218 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00006219 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01006220 46, 47, 48,
6221 49, 50, 51,
6222 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00006223 })
6224 );
6225
telsoa01c577f2c2018-08-31 09:22:23 +01006226 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00006227 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6228
telsoa01c577f2c2018-08-31 09:22:23 +01006229 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00006230 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6231
telsoa014fcda012018-03-09 14:13:49 +00006232
6233 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6234
6235 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6236
6237 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6238 subTensorsSupported ?
6239 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6240 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6241
6242 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6243 subTensorsSupported ?
6244 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6245 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6246
telsoa014fcda012018-03-09 14:13:49 +00006247
6248 armnn::MergerQueueDescriptor data;
6249 armnn::WorkloadInfo info;
6250 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6251 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00006252 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6253
6254 data.m_ViewOrigins.push_back(window1);
6255 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00006256
6257 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6258
6259 inputHandle1->Allocate();
6260 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006261 outputHandle->Allocate();
6262
6263 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6264 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006265
6266 workload->Execute();
6267
6268 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6269
6270 return ret;
6271}
6272
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006273LayerTestResult<uint8_t, 4> AdditionUint8Test(
6274 armnn::IWorkloadFactory& workloadFactory,
6275 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006276{
6277 unsigned int batchSize = 1;
6278 unsigned int channels = 2;
6279 unsigned int height = 2;
6280 unsigned int width = 3;
6281
6282 const float scale = 7.0f;
6283 const int32_t offset = 3;
6284
6285 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
6286 armnn::TensorInfo outputTensorInfo;
6287
6288 const unsigned int shape[] = { batchSize, channels, height, width };
6289 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
6290 inputTensorInfo1.SetQuantizationScale(scale);
6291 inputTensorInfo1.SetQuantizationOffset(offset);
6292
6293 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
6294 inputTensorInfo2.SetQuantizationScale(scale);
6295 inputTensorInfo2.SetQuantizationOffset(offset);
6296
6297 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
6298 outputTensorInfo.SetQuantizationScale(scale);
6299 outputTensorInfo.SetQuantizationOffset(offset);
6300
telsoa01c577f2c2018-08-31 09:22:23 +01006301 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00006302 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
6303 {
6304 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
6305 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
6306 }));
6307
telsoa01c577f2c2018-08-31 09:22:23 +01006308 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00006309 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
6310 {
6311 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
6312 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
6313 }));
6314
telsoa01c577f2c2018-08-31 09:22:23 +01006315 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00006316 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6317 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
6318 {
6319 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
6320 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
6321 }));
6322
6323 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6324 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
6325 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6326
6327 armnn::AdditionQueueDescriptor data;
6328 armnn::WorkloadInfo info;
6329 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6330 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6331 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6332
6333 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
6334
6335 inputHandle1->Allocate();
6336 inputHandle2->Allocate();
6337 outputHandle->Allocate();
6338
6339 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
6340 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
6341
6342 workload->Execute();
6343
6344 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6345
6346 return result;
6347}
6348
surmeh01bceff2f2018-03-29 16:29:27 +01006349namespace
telsoa014fcda012018-03-09 14:13:49 +00006350{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006351LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(
6352 armnn::IWorkloadFactory& workloadFactory,
6353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6354 const unsigned int shape0[4],
6355 const std::vector<uint8_t> & values0,
6356 float scale0,
6357 int32_t offset0,
6358 const unsigned int shape1[4],
6359 const std::vector<uint8_t> & values1,
6360 float scale1,
6361 int32_t offset1,
6362 const unsigned int outShape[4],
6363 const std::vector<uint8_t> & outValues,
6364 float outScale,
6365 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01006366{
6367 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
6368 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
6369 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006370
surmeh01bceff2f2018-03-29 16:29:27 +01006371 inputTensorInfo0.SetQuantizationScale(scale0);
6372 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00006373
surmeh01bceff2f2018-03-29 16:29:27 +01006374 inputTensorInfo1.SetQuantizationScale(scale1);
6375 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00006376
surmeh01bceff2f2018-03-29 16:29:27 +01006377 outputTensorInfo.SetQuantizationScale(outScale);
6378 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00006379
surmeh01bceff2f2018-03-29 16:29:27 +01006380 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
6381 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00006382
telsoa014fcda012018-03-09 14:13:49 +00006383 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01006384 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00006385
surmeh01bceff2f2018-03-29 16:29:27 +01006386 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00006387 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00006388 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6389
6390 armnn::MultiplicationQueueDescriptor data;
6391 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01006392 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6393 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00006394 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6395
6396 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
6397
surmeh01bceff2f2018-03-29 16:29:27 +01006398 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006399 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006400 outputHandle->Allocate();
6401
surmeh01bceff2f2018-03-29 16:29:27 +01006402 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006403 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006404
6405 workload->Execute();
6406
6407 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6408
6409 return result;
6410}
surmeh01bceff2f2018-03-29 16:29:27 +01006411} // anonymous namespace
6412
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006413LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
6414 armnn::IWorkloadFactory& workloadFactory,
6415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006416{
6417 unsigned int batchSize = 1;
6418 unsigned int channels = 2;
6419 unsigned int height = 2;
6420 unsigned int width = 3;
6421 const unsigned int shape[] = { batchSize, channels, height, width };
6422
telsoa01c577f2c2018-08-31 09:22:23 +01006423 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006424 std::vector<uint8_t> input0({
6425 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
6426 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
6427 });
6428
telsoa01c577f2c2018-08-31 09:22:23 +01006429 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006430 std::vector<uint8_t> input1({
6431 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
6432 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
6433 });
6434
telsoa01c577f2c2018-08-31 09:22:23 +01006435 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006436 std::vector<uint8_t> output(
6437 {
6438 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
6439 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
6440 });
6441
6442 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006443 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01006444 shape,
6445 input0,
6446 4.0f,
6447 1,
6448 shape,
6449 input1,
6450 3.0f,
6451 -2,
6452 shape,
6453 output,
telsoa01c577f2c2018-08-31 09:22:23 +01006454 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01006455 -5);
6456}
6457
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006458LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
6459 armnn::IWorkloadFactory& workloadFactory,
6460 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006461{
6462 const unsigned int shape0[] = { 1, 2, 2, 3 };
6463 const unsigned int shape1[] = { 1, 1, 1, 1 };
6464
6465 std::vector<uint8_t> input0({
6466 1, 2, 3, 4, 5, 6,
6467 7, 8, 9, 10, 11, 12
6468 });
6469
6470 std::vector<uint8_t> input1({2});
6471
6472 std::vector<uint8_t> output({
6473 2, 4, 6, 8, 10, 12,
6474 14, 16, 18, 20, 22, 24
6475 });
6476
6477 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006478 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01006479 shape0,
6480 input0,
6481 1.0f,
6482 0,
6483 shape1,
6484 input1,
6485 1.0f,
6486 0,
6487 shape0,
6488 output,
6489 1.0f,
6490 0);
6491}
6492
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006493LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
6494 armnn::IWorkloadFactory& workloadFactory,
6495 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006496{
6497 const unsigned int shape0[] = { 1, 2, 2, 3 };
6498 const unsigned int shape1[] = { 1, 1, 1, 3 };
6499
6500 std::vector<uint8_t> input0({
6501 1, 2, 3, 4, 5, 6,
6502 7, 8, 9, 10, 11, 12
6503 });
6504
6505 std::vector<uint8_t> input1({1, 2, 3});
6506
6507 std::vector<uint8_t> output({
6508 1, 4, 9, 4, 10, 18,
6509 7, 16, 27, 10, 22, 36
6510 });
6511
6512 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006513 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01006514 shape0,
6515 input0,
6516 1.0f,
6517 0,
6518 shape1,
6519 input1,
6520 1.0f,
6521 0,
6522 shape0,
6523 output,
6524 1.0f,
6525 0);
6526}
telsoa014fcda012018-03-09 14:13:49 +00006527
David Beckf195f032018-09-06 16:46:34 +01006528namespace
6529{
6530template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006531LayerTestResult<T, 4> SubtractionTestHelper(
6532 armnn::IWorkloadFactory& workloadFactory,
6533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6534 const unsigned int shape0[4],
6535 const std::vector<T>& values0,
6536 float scale0,
6537 int32_t offset0,
6538 const unsigned int shape1[4],
6539 const std::vector<T> & values1,
6540 float scale1,
6541 int32_t offset1,
6542 const unsigned int outShape[4],
6543 const std::vector<T> & outValues,
6544 float outScale,
6545 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01006546{
6547 auto dataType = (std::is_same<T, uint8_t>::value ?
6548 armnn::DataType::QuantisedAsymm8 :
6549 armnn::DataType::Float32);
6550
6551 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
6552 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
6553 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
6554
6555 inputTensorInfo0.SetQuantizationScale(scale0);
6556 inputTensorInfo0.SetQuantizationOffset(offset0);
6557
6558 inputTensorInfo1.SetQuantizationScale(scale1);
6559 inputTensorInfo1.SetQuantizationOffset(offset1);
6560
6561 outputTensorInfo.SetQuantizationScale(outScale);
6562 outputTensorInfo.SetQuantizationOffset(outOffset);
6563
6564 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
6565 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
6566
6567 LayerTestResult<T, 4> result(outputTensorInfo);
6568 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
6569
6570 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
6571 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6572 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6573
6574 armnn::SubtractionQueueDescriptor data;
6575 armnn::WorkloadInfo info;
6576 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6577 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6578 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6579
6580 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
6581
6582 inputHandle0->Allocate();
6583 inputHandle1->Allocate();
6584 outputHandle->Allocate();
6585
6586 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
6587 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
6588
David Beckf195f032018-09-06 16:46:34 +01006589 workload->Execute();
6590
6591 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6592
6593 return result;
6594}
6595} // anonymous namespace
6596
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006597LayerTestResult<uint8_t, 4> SubtractionUint8Test(
6598 armnn::IWorkloadFactory& workloadFactory,
6599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006600{
6601 const unsigned int shape0[] = { 1, 1, 2, 2 };
6602 const unsigned int shape1[] = { 1, 1, 2, 2 };
6603
6604 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6605 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
6606 std::vector<uint8_t> output({ 3, 3, 5, 5 });
6607
6608 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006609 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006610 shape0, input0, 0.5f, 2,
6611 shape1, input1, 1.0f, 0,
6612 shape0, output, 1.0f, 0);
6613}
6614
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006615LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
6616 armnn::IWorkloadFactory& workloadFactory,
6617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006618{
6619 const unsigned int shape0[] = { 1, 1, 2, 2 };
6620 const unsigned int shape1[] = { 1, 1, 1, 1 };
6621
6622 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6623 std::vector<uint8_t> input1({ 2 });
6624 std::vector<uint8_t> output({ 5, 6, 7, 8 });
6625
6626 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006627 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006628 shape0, input0, 0.5f, 2,
6629 shape1, input1, 1.0f, 0,
6630 shape0, output, 1.0f, 3);
6631}
6632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006633LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
6634 armnn::IWorkloadFactory& workloadFactory,
6635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006636{
6637 const unsigned int shape0[] = { 1, 1, 2, 2 };
6638 const unsigned int shape1[] = { 1, 1, 2, 1 };
6639
6640 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6641 std::vector<uint8_t> input1({ 2, 1 });
6642 std::vector<uint8_t> output({ 8, 11, 12, 15 });
6643
6644 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006645 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006646 shape0, input0, 1.0f, 0,
6647 shape1, input1, 1.0f, 0,
6648 shape0, output, 1.0f, 0);
6649}
6650
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006651LayerTestResult<float, 4> SubtractionTest(
6652 armnn::IWorkloadFactory& workloadFactory,
6653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006654{
6655 const unsigned int shape0[] = { 1, 1, 2, 2 };
6656 const unsigned int shape1[] = { 1, 1, 2, 2 };
6657
6658 std::vector<float> input0({ 1, 2, 3, 4 });
6659 std::vector<float> input1({ 1, -1, 0, 2 });
6660 std::vector<float> output({ 0, 3, 3, 2 });
6661
6662 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006663 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006664 shape0, input0, 1.0f, 0,
6665 shape1, input1, 1.0f, 0,
6666 shape0, output, 1.0f, 0);
6667}
6668
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006669LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
6670 armnn::IWorkloadFactory& workloadFactory,
6671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006672{
6673 const unsigned int shape0[] = { 1, 1, 2, 2 };
6674 const unsigned int shape1[] = { 1, 1, 1, 1 };
6675
6676 std::vector<float> input0({ 1, 2, 3, 4 });
6677 std::vector<float> input1({ 10 });
6678 std::vector<float> output({ -9, -8, -7, -6 });
6679
6680 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006681 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006682 shape0, input0, 1.0f, 0,
6683 shape1, input1, 1.0f, 0,
6684 shape0, output, 1.0f, 0);
6685}
6686
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006687LayerTestResult<float, 4> SubtractionBroadcastTest(
6688 armnn::IWorkloadFactory& workloadFactory,
6689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006690{
6691 const unsigned int shape0[] = { 1, 1, 2, 2 };
6692 const unsigned int shape1[] = { 1, 1, 1, 2 };
6693
6694 std::vector<float> input0({ 1, 2, 3, 4 });
6695 std::vector<float> input1({ 10, -5 });
6696 std::vector<float> output({ -9, 7, -7, 9 });
6697
6698 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006699 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006700 shape0, input0, 1.0f, 0,
6701 shape1, input1, 1.0f, 0,
6702 shape0, output, 1.0f, 0);
6703}
6704
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006705LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
6706 armnn::IWorkloadFactory& workloadFactory,
6707 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006708{
6709 constexpr unsigned int inputWidth = 4;
6710 constexpr unsigned int inputHeight = 4;
6711 constexpr unsigned int inputChannels = 1;
6712 constexpr unsigned int inputBatchSize = 1;
6713
6714 constexpr unsigned int outputWidth = inputWidth;
6715 constexpr unsigned int outputHeight = inputHeight;
6716 constexpr unsigned int outputChannels = inputChannels;
6717 constexpr unsigned int outputBatchSize = inputBatchSize;
6718
6719 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6720 armnn::DataType::QuantisedAsymm8);
6721 inputTensorInfo.SetQuantizationScale(1.5f);
6722 inputTensorInfo.SetQuantizationOffset(-3);
6723
6724 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6725 armnn::DataType::QuantisedAsymm8);
6726 outputTensorInfo.SetQuantizationScale(1.5f);
6727 outputTensorInfo.SetQuantizationOffset(-3);
6728
6729 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6730 1, 2, 3, 4,
6731 2, 3, 4, 5,
6732 3, 4, 5, 6,
6733 4, 5, 6, 7
6734 }));
6735
6736 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6737 result.outputExpected = input;
6738
6739 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6740 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6741
6742 armnn::ResizeBilinearQueueDescriptor descriptor;
6743 armnn::WorkloadInfo info;
6744 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6745 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6746
6747 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6748
6749 inputHandle->Allocate();
6750 outputHandle->Allocate();
6751 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6752
6753 workload->Execute();
6754
6755 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6756 return result;
6757}
6758
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006759LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
6760 armnn::IWorkloadFactory& workloadFactory,
6761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006762{
6763 constexpr unsigned int inputWidth = 2;
6764 constexpr unsigned int inputHeight = 2;
6765 constexpr unsigned int inputChannels = 1;
6766 constexpr unsigned int inputBatchSize = 1;
6767
6768 constexpr unsigned int outputWidth = inputWidth / 2;
6769 constexpr unsigned int outputHeight = inputHeight / 2;
6770 constexpr unsigned int outputChannels = inputChannels;
6771 constexpr unsigned int outputBatchSize = inputBatchSize;
6772
6773 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6774 armnn::DataType::QuantisedAsymm8);
6775 inputTensorInfo.SetQuantizationScale(0.1567f);
6776 inputTensorInfo.SetQuantizationOffset(1);
6777
6778 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6779 armnn::DataType::QuantisedAsymm8);
6780 outputTensorInfo.SetQuantizationScale(0.1567f);
6781 outputTensorInfo.SetQuantizationOffset(1);
6782
6783 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6784 1, 255,
6785 200, 250
6786 }));
6787
6788 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
6789 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01006790 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00006791 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
6792 // the centre).
6793 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6794 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6795 1
6796 }));
6797
6798 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6799 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6800
6801 armnn::ResizeBilinearQueueDescriptor descriptor;
6802 armnn::WorkloadInfo info;
6803 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6804 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6805
6806 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6807
6808 inputHandle->Allocate();
6809 outputHandle->Allocate();
6810 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6811
6812 workload->Execute();
6813
6814 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6815 return result;
6816}
6817
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006818LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
6819 armnn::IWorkloadFactory& workloadFactory,
6820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006821{
6822 constexpr unsigned int inputWidth = 4;
6823 constexpr unsigned int inputHeight = 4;
6824 constexpr unsigned int inputChannels = 1;
6825 constexpr unsigned int inputBatchSize = 1;
6826
6827 constexpr unsigned int outputWidth = inputWidth / 2;
6828 constexpr unsigned int outputHeight = inputHeight / 2;
6829 constexpr unsigned int outputChannels = inputChannels;
6830 constexpr unsigned int outputBatchSize = inputBatchSize;
6831
6832 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6833 armnn::DataType::QuantisedAsymm8);
6834 inputTensorInfo.SetQuantizationScale(3.141592f);
6835 inputTensorInfo.SetQuantizationOffset(3);
6836
6837 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6838 armnn::DataType::QuantisedAsymm8);
6839 outputTensorInfo.SetQuantizationScale(3.141592f);
6840 outputTensorInfo.SetQuantizationOffset(3);
6841
6842 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6843 1, 2, 3, 4,
6844 2, 3, 4, 5,
6845 3, 4, 5, 6,
6846 4, 5, 6, 7
6847 }));
6848
6849 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6850 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6851 1, 3,
6852 3, 5
6853 }));
6854
6855 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6856 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6857
6858 armnn::ResizeBilinearQueueDescriptor descriptor;
6859 armnn::WorkloadInfo info;
6860 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6861 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6862
6863 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6864
6865 inputHandle->Allocate();
6866 outputHandle->Allocate();
6867 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6868
6869 workload->Execute();
6870
6871 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6872 return result;
6873}
6874
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006875LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
6876 armnn::IWorkloadFactory& workloadFactory,
6877 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006878{
6879 constexpr unsigned int inputWidth = 3;
6880 constexpr unsigned int inputHeight = 2;
6881 constexpr unsigned int inputChannels = 1;
6882 constexpr unsigned int inputBatchSize = 1;
6883
6884 constexpr unsigned int outputWidth = 2;
6885 constexpr unsigned int outputHeight = 1;
6886 constexpr unsigned int outputChannels = inputChannels;
6887 constexpr unsigned int outputBatchSize = inputBatchSize;
6888
6889 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6890 armnn::DataType::QuantisedAsymm8);
6891 inputTensorInfo.SetQuantizationScale(1.5f);
6892 inputTensorInfo.SetQuantizationOffset(-1);
6893
6894 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6895 armnn::DataType::QuantisedAsymm8);
6896 outputTensorInfo.SetQuantizationScale(1.5f);
6897 outputTensorInfo.SetQuantizationOffset(-1);
6898
6899 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6900 1, 2, 3, // 3.0, 4.5, 6.0
6901 5, 8, 13 // 9.0, 13.5, 21.0
6902 }));
6903
6904 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6905 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6906 1, 3 // 3.0, 5.25
6907 }));
6908
6909 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6910 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6911
6912 armnn::ResizeBilinearQueueDescriptor descriptor;
6913 armnn::WorkloadInfo info;
6914 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6915 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6916
6917 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6918
6919 inputHandle->Allocate();
6920 outputHandle->Allocate();
6921
6922 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6923
6924 workload->Execute();
6925
6926 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6927 return result;
6928}
6929
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006930LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
6931 armnn::IWorkloadFactory& workloadFactory,
6932 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006933{
6934 constexpr unsigned int inputWidth = 2;
6935 constexpr unsigned int inputHeight = 3;
6936 constexpr unsigned int inputChannels = 1;
6937 constexpr unsigned int inputBatchSize = 1;
6938
6939 constexpr unsigned int outputWidth = 5;
6940 constexpr unsigned int outputHeight = 3;
6941 constexpr unsigned int outputChannels = inputChannels;
6942 constexpr unsigned int outputBatchSize = inputBatchSize;
6943
6944 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6945 armnn::DataType::QuantisedAsymm8);
6946 inputTensorInfo.SetQuantizationScale(0.010765f);
6947 inputTensorInfo.SetQuantizationOffset(7);
6948
6949 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6950 armnn::DataType::QuantisedAsymm8);
6951 outputTensorInfo.SetQuantizationScale(0.010132f);
6952 outputTensorInfo.SetQuantizationOffset(-18);
6953
6954 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6955 24, 228, // 0.183005, 2.379065,
6956 105, 128, // 1.05497, 1.302565
6957 230, 71 // 2.400595, 0.68896
6958 }));
6959
6960 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6961 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6962 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
6963 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
6964 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
6965 }));
6966
6967 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6968 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6969
6970 armnn::ResizeBilinearQueueDescriptor descriptor;
6971 armnn::WorkloadInfo info;
6972 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6973 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6974
6975 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6976
6977 inputHandle->Allocate();
6978 outputHandle->Allocate();
6979 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6980
6981 workload->Execute();
6982
6983 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6984 return result;
6985}
6986
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00006987LayerTestResult<float, 2> Rsqrt2dTestCommon(
6988 armnn::IWorkloadFactory& workloadFactory,
6989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6990 const armnn::TensorInfo inputTensorInfo,
6991 const armnn::TensorInfo outputTensorInfo,
6992 std::vector<float> inputValues,
6993 std::vector<float> expectedOutputValues)
6994{
6995 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, std::vector<float>(inputValues));
6996
6997 LayerTestResult<float, 2> result(outputTensorInfo);
6998 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(expectedOutputValues));
6999
7000 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7001 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7002
7003 armnn::RsqrtQueueDescriptor descriptor;
7004
7005 armnn::WorkloadInfo info;
7006
7007 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7008 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7009
7010 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7011
7012 inputHandle->Allocate();
7013 outputHandle->Allocate();
7014
7015 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7016
7017 workload->Execute();
7018
7019 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7020
7021 return result;
7022}
7023LayerTestResult<float, 2> Rsqrt2dTest(
7024 armnn::IWorkloadFactory& workloadFactory,
7025 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7026{
7027 const armnn::TensorShape inputShape{ 2, 2 };
7028 const armnn::TensorShape outputShape{ 2, 2 };
7029
7030 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7031 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7032
7033 std::vector<float> inputValues
7034 {
7035 1.f, 4.f,
7036 16.f, 25.f
7037 };
7038
7039 std::vector<float> expectedOutputValues
7040 {
7041 1.f, 0.5f,
7042 0.25f, 0.2f
7043 };
7044
7045 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7046 inputTensorInfo, outputTensorInfo,
7047 inputValues, expectedOutputValues);
7048}
7049
7050LayerTestResult<float, 3> Rsqrt3dTest(
7051 armnn::IWorkloadFactory& workloadFactory,
7052 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7053{
7054 const armnn::TensorShape inputShape{ 3, 1, 2 };
7055 const armnn::TensorShape outputShape{ 3, 1, 2 };
7056
7057 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7058 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7059
7060 std::vector<float> inputValues
7061 {
7062 1.f, 4.f, 16.f,
7063 25.f, 64.f, 100.f
7064 };
7065
7066 std::vector<float> expectedOutputValues
7067 {
7068 1.f, 0.5f, 0.25f,
7069 0.2f, 0.125f, 0.1f
7070 };
7071
7072 auto inputTensor = MakeTensor<float, 3>(inputTensorInfo, std::vector<float>(inputValues));
7073
7074 LayerTestResult<float, 3> result(outputTensorInfo);
7075 result.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float >(expectedOutputValues));
7076
7077 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7078 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7079
7080 armnn::RsqrtQueueDescriptor descriptor;
7081
7082 armnn::WorkloadInfo info;
7083
7084 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7085 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7086
7087 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7088
7089 inputHandle->Allocate();
7090 outputHandle->Allocate();
7091
7092 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
7093
7094 workload->Execute();
7095
7096 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
7097
7098 return result;
7099}
7100
7101LayerTestResult<float, 2> RsqrtZeroTest(
7102 armnn::IWorkloadFactory& workloadFactory,
7103 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7104{
7105 const armnn::TensorShape inputShape{ 1, 2 };
7106 const armnn::TensorShape outputShape{ 1, 2 };
7107
7108 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7109 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7110
7111 std::vector<float> inputValues
7112 {
7113 0.f, -0.f
7114 };
7115
7116 std::vector<float> expectedOutputValues
7117 {
7118 INFINITY, -INFINITY
7119 };
7120
7121 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7122 inputTensorInfo, outputTensorInfo,
7123 inputValues, expectedOutputValues);
7124}
7125
7126LayerTestResult<float, 2> RsqrtNegativeTest(
7127 armnn::IWorkloadFactory& workloadFactory,
7128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7129{
7130 const armnn::TensorShape inputShape{ 1, 2 };
7131 const armnn::TensorShape outputShape{ 1, 2 };
7132
7133 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7134 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7135
7136 std::vector<float> inputValues
7137 {
7138 -25.f, -16.f
7139 };
7140
7141 std::vector<float> expectedOutputValues
7142 {
7143 -NAN, -NAN
7144 };
7145
7146 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7147 inputTensorInfo, outputTensorInfo,
7148 inputValues, expectedOutputValues);
7149}
7150
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007151LayerTestResult<float, 4> BatchNormTest(
7152 armnn::IWorkloadFactory& workloadFactory,
7153 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007154{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007155 // BatchSize: 1
7156 // Channels: 2
7157 // Height: 3
7158 // Width: 2
7159
7160 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7161 std::vector<float> inputValues
7162 {
7163 // Batch 0, Channel 0, Height (3) x Width (2)
7164 1.f, 4.f,
7165 4.f, 2.f,
7166 1.f, 6.f,
7167
7168 // Batch 0, Channel 1, Height (3) x Width (2)
7169 1.f, 1.f,
7170 4.f, 1.f,
7171 -2.f, 4.f
7172 };
7173 std::vector<float> expectedOutputValues
7174 {
7175 // Batch 0, Channel 0, Height (3) x Width (2)
7176 1.f, 4.f,
7177 4.f, 2.f,
7178 1.f, 6.f,
7179
7180 // Batch 0, Channel 1, Height (3) x Width (2)
7181 3.f, 3.f,
7182 4.f, 3.f,
7183 2.f, 4.f
7184 };
7185
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007186 return BatchNormTestImpl<armnn::DataType::Float32>(
7187 workloadFactory, memoryManager,
7188 inputOutputShape, inputValues, expectedOutputValues,
7189 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007190}
7191
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007192LayerTestResult<float, 4> BatchNormNhwcTest(
7193 armnn::IWorkloadFactory& workloadFactory,
7194 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007195{
7196 // BatchSize: 1
7197 // Height: 3
7198 // Width: 2
7199 // Channels: 2
7200
7201 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7202 std::vector<float> inputValues
7203 {
7204 // Batch 0, Height 0, Width (2) x Channel (2)
7205 1.f, 1.f,
7206 4.f, 1.f,
7207
7208 // Batch 0, Height 1, Width (2) x Channel (2)
7209 4.f, 4.f,
7210 2.f, 1.f,
7211
7212 // Batch 0, Height 2, Width (2) x Channel (2)
7213 1.f, -2.f,
7214 6.f, 4.f
7215 };
7216 std::vector<float> expectedOutputValues
7217 {
7218 // Batch 0, Height 0, Width (2) x Channel (2)
7219 1.f, 3.f,
7220 4.f, 3.f,
7221
7222 // Batch 0, Height 1, Width (2) x Channel (2)
7223 4.f, 4.f,
7224 2.f, 3.f,
7225
7226 // Batch 0, Height 2, Width (2) x Channel (2)
7227 1.f, 2.f,
7228 6.f, 4.f
7229 };
7230
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007231 return BatchNormTestImpl<armnn::DataType::Float32>(
7232 workloadFactory, memoryManager,
7233 inputOutputShape, inputValues, expectedOutputValues,
7234 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00007235}
7236
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007237LayerTestResult<uint8_t, 4> BatchNormUint8Test(
7238 armnn::IWorkloadFactory& workloadFactory,
7239 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007240{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007241 // BatchSize: 1
7242 // Channels: 2
7243 // Height: 3
7244 // Width: 2
7245
7246 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7247 std::vector<float> inputValues
7248 {
7249 // Batch 0, Channel 0, Height (3) x Width (2)
7250 1.f, 4.f,
7251 4.f, 2.f,
7252 1.f, 6.f,
7253
7254 // Batch 0, Channel 1, Height (3) x Width (2)
7255 1.f, 1.f,
7256 4.f, 1.f,
7257 -2.f, 4.f
7258 };
7259 std::vector<float> expectedOutputValues
7260 {
7261 // Batch 0, Channel 0, Height (3) x Width (2)
7262 1.f, 4.f,
7263 4.f, 2.f,
7264 1.f, 6.f,
7265
7266 // Batch 0, Channel 1, Height (3) x Width (2)
7267 3.f, 3.f,
7268 4.f, 3.f,
7269 2.f, 4.f
7270 };
7271
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007272 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
7273 workloadFactory, memoryManager,
7274 inputOutputShape, inputValues, expectedOutputValues,
7275 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007276}
7277
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007278LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
7279 armnn::IWorkloadFactory& workloadFactory,
7280 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007281{
7282 // BatchSize: 1
7283 // Height: 3
7284 // Width: 2
7285 // Channels: 2
7286
7287 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7288 std::vector<float> inputValues
7289 {
7290 // Batch 0, Height 0, Width (2) x Channel (2)
7291 1.f, 1.f,
7292 4.f, 1.f,
7293
7294 // Batch 0, Height 1, Width (2) x Channel (2)
7295 4.f, 4.f,
7296 2.f, 1.f,
7297
7298 // Batch 0, Height 2, Width (2) x Channel (2)
7299 1.f, -2.f,
7300 6.f, 4.f
7301 };
7302 std::vector<float> expectedOutputValues
7303 {
7304 // Batch 0, Height 0, Width (2) x Channel (2)
7305 1.f, 3.f,
7306 4.f, 3.f,
7307
7308 // Batch 0, Height 1, Width (2) x Channel (2)
7309 4.f, 4.f,
7310 2.f, 3.f,
7311
7312 // Batch 0, Height 2, Width (2) x Channel (2)
7313 1.f, 2.f,
7314 6.f, 4.f
7315 };
7316
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007317 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
7318 (workloadFactory, memoryManager,
7319 inputOutputShape, inputValues, expectedOutputValues,
7320 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00007321}
7322
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007323LayerTestResult<uint8_t, 4> ConstantUint8Test(
7324 armnn::IWorkloadFactory& workloadFactory,
7325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007326{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007327 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00007328}
7329
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007330LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
7331 armnn::IWorkloadFactory& workloadFactory,
7332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007333{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007334 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007335}
7336
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007337LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
7338 armnn::IWorkloadFactory& workloadFactory,
7339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007340{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007341 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007342}
7343
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007344LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
7345 armnn::IWorkloadFactory& workloadFactory,
7346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007347{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007348 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007349}
7350
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007351LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
7352 armnn::IWorkloadFactory& workloadFactory,
7353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007354{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007355 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7356 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007357}
7358
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007359LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
7360 armnn::IWorkloadFactory& workloadFactory,
7361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007362{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007363 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7364 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007365}
7366
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007367LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
7368 armnn::IWorkloadFactory& workloadFactory,
7369 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007370{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007371 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007372}
7373
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007374LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
7375 armnn::IWorkloadFactory& workloadFactory,
7376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007377{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007378 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007379}
7380
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007381LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
7382 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00007383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7384 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00007385{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007386 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
7387 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007388}
7389
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007390LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
7391 armnn::IWorkloadFactory& workloadFactory,
7392 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007393{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007394 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007395}
7396
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007397LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
7398 armnn::IWorkloadFactory& workloadFactory,
7399 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007401 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7402 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007403}
7404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007405LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
7406 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00007407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7408 bool useSubtensor)
7409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007410 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7411 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007412}
7413
7414LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
7415 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007417{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007418 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007419}
7420
7421LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
7422 armnn::IWorkloadFactory& workloadFactory,
7423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007425 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007426}
7427
7428LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
7429 armnn::IWorkloadFactory& workloadFactory,
7430 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7431{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007432 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007433}
7434
7435LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
7436 armnn::IWorkloadFactory& workloadFactory,
7437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
7438{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007439 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
7440 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00007441}
7442
7443LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
7444 armnn::IWorkloadFactory& workloadFactory,
7445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7446{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007447 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
7448 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007449}
7450
7451LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
7452 armnn::IWorkloadFactory& workloadFactory,
7453 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7454{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007455 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
7456 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007457}
7458
7459LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
7460 armnn::IWorkloadFactory& workloadFactory,
7461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7462{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007463 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
7464 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007465}
7466
7467LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
7468 armnn::IWorkloadFactory& workloadFactory,
7469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7470 bool useSubtensor)
7471{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007472 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
7473 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00007474}
7475
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007476LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
7477 armnn::IWorkloadFactory& workloadFactory,
7478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7479 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007480{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007481 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
7482 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00007483}
7484
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007485LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
7486 armnn::IWorkloadFactory& workloadFactory,
7487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7488 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007489{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007490 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007491 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007492}
7493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007494LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
7495 armnn::IWorkloadFactory& workloadFactory,
7496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7497 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007498{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007499 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
7500 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00007501}
7502
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007503LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
7504 armnn::IWorkloadFactory& workloadFactory,
7505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7506 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007507{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007508 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007509 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00007510}
7511
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007512LayerTestResult<float, 4> SimpleMaxPooling2dTest(
7513 armnn::IWorkloadFactory& workloadFactory,
7514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007515 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007516{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007517 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00007518}
7519
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007520LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
7521 armnn::IWorkloadFactory& workloadFactory,
7522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007523 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01007524{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007525 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01007526}
7527
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007528LayerTestResult<float, 4> SimpleAveragePooling2dTest(
7529 armnn::IWorkloadFactory& workloadFactory,
7530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007531 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007532{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007533 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01007534}
7535
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007536LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
7537 armnn::IWorkloadFactory& workloadFactory,
7538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007539 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01007540{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007541 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007542 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00007543}
7544
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007545LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
7546 armnn::IWorkloadFactory& workloadFactory,
7547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7548 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01007549{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007550 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007551 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01007552}
7553
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007554LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
7555 armnn::IWorkloadFactory& workloadFactory,
7556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007557{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007558 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007559}
7560
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007561LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
7562 armnn::IWorkloadFactory& workloadFactory,
7563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007564{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007565 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
7566 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00007567}
7568
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007569LayerTestResult<float, 4> SimpleL2Pooling2dTest(
7570 armnn::IWorkloadFactory& workloadFactory,
7571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007572 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007573{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007574 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00007575}
7576
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007577LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
7578 armnn::IWorkloadFactory& workloadFactory,
7579 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007580 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007581{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007582 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00007583}
7584
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007585LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
7586 armnn::IWorkloadFactory& workloadFactory,
7587 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007588{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007589 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007590}
7591
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007592LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
7593 armnn::IWorkloadFactory& workloadFactory,
7594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007595{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007596 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007597}
7598
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007599LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
7600 armnn::IWorkloadFactory& workloadFactory,
7601 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007602{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007603 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007604}
7605
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007606LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
7607 armnn::IWorkloadFactory& workloadFactory,
7608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007609{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007610 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007611}
7612
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007613LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
7614 armnn::IWorkloadFactory& workloadFactory,
7615 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007616{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007617 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007618}
7619
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007620LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
7621 armnn::IWorkloadFactory& workloadFactory,
7622 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007623{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007624 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007625}
7626
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007627LayerTestResult<float, 4> L2Pooling2dSize7Test(
7628 armnn::IWorkloadFactory& workloadFactory,
7629 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007630{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007631 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007632}
7633
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007634LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
7635 armnn::IWorkloadFactory& workloadFactory,
7636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007637{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007638 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007639}
7640
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007641LayerTestResult<float, 4> L2Pooling2dSize9Test(
7642 armnn::IWorkloadFactory& workloadFactory,
7643 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007644{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007645 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007646}
7647
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007648LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
7649 armnn::IWorkloadFactory& workloadFactory,
7650 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007651{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007652 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007653}
7654
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007655LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
7656 armnn::IWorkloadFactory& workloadFactory,
7657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007658{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007659 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007660}
7661
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007662LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
7663 armnn::IWorkloadFactory& workloadFactory,
7664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007665{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007666 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007667}
7668
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007669LayerTestResult<float, 4> ComparePooling2dTest(
7670 armnn::IWorkloadFactory& workloadFactory,
7671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7672 armnn::IWorkloadFactory& refWorkloadFactory,
7673 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00007674{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007675 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007676 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00007677}
7678
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007679LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
7680 armnn::IWorkloadFactory& workloadFactory,
7681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7682 armnn::IWorkloadFactory& refWorkloadFactory,
7683 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00007684{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007685 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007686 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00007687}
7688
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007689LayerTestResult<float, 2> FullyConnectedLargeTest(
7690 armnn::IWorkloadFactory& workloadFactory,
7691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7692 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00007693{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007694 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00007695}
7696
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007697LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
7698 armnn::IWorkloadFactory& workloadFactory,
7699 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007700{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007701 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007702}
7703
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007704LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
7705 armnn::IWorkloadFactory& workloadFactory,
7706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007707{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007708 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
7709 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007710}
7711
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007712LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
7713 armnn::IWorkloadFactory& workloadFactory,
7714 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007715{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007716 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007717}
7718
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007719LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
7720 armnn::IWorkloadFactory& workloadFactory,
7721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007722{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007723 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
7724 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007725}
7726
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007727LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
7728 armnn::IWorkloadFactory& workloadFactory,
7729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007730{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007731 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007732}
7733
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007734LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
7735 armnn::IWorkloadFactory& workloadFactory,
7736 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007737{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007738 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
7739 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007740}
7741
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007742LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
7743 armnn::IWorkloadFactory& workloadFactory,
7744 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007745{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007746 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
7747 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007748}
7749
7750LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007751 armnn::IWorkloadFactory& workloadFactory,
7752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007753{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007754 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
7755 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007756}
7757
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007758LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
7759 armnn::IWorkloadFactory& workloadFactory,
7760 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007761{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007762 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007763}
7764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007765LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
7766 armnn::IWorkloadFactory& workloadFactory,
7767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007769 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
7770 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007771}
7772
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007773LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
7774 armnn::IWorkloadFactory& workloadFactory,
7775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007776{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007777 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007778}
7779
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007780LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
7781 armnn::IWorkloadFactory& workloadFactory,
7782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007783{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007784 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007785}
7786
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007787LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
7788 armnn::IWorkloadFactory& workloadFactory,
7789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007790{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007791 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007792}
7793
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007794LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
7795 armnn::IWorkloadFactory& workloadFactory,
7796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007797{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007798 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007799}
7800
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007801LayerTestResult<float, 4> SimplePermuteFloat32Test(
7802 armnn::IWorkloadFactory& workloadFactory,
7803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007804{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007805 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007806};
7807
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007808LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
7809 armnn::IWorkloadFactory& workloadFactory,
7810 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007811{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007812 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007813};
surmeh01bceff2f2018-03-29 16:29:27 +01007814
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007815LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
7816 armnn::IWorkloadFactory& workloadFactory,
7817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007818{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007819 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01007820};
7821
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007822LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
7823 armnn::IWorkloadFactory& workloadFactory,
7824 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007825{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007826 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01007827};
7828
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007829LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
7830 armnn::IWorkloadFactory& workloadFactory,
7831 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007832{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007833 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01007834};
7835
7836namespace
7837{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007838
narpra011e4c31d2018-09-28 11:07:51 +01007839template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007840LayerTestResult<T, OutputDim> MeanTestHelper(
7841 armnn::IWorkloadFactory& workloadFactory,
7842 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7843 const unsigned int* inputShape,
7844 const std::vector<T>& inputData,
7845 const std::vector<unsigned int>& axis,
7846 bool keepDims,
7847 const unsigned int* outputShape,
7848 const std::vector<T>& outputData,
7849 float scale = 1.0f,
7850 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01007851{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007852 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01007853
7854 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
7855 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
7856
7857 inputTensorInfo.SetQuantizationScale(scale);
7858 inputTensorInfo.SetQuantizationOffset(offset);
7859
7860 outputTensorInfo.SetQuantizationScale(scale);
7861 outputTensorInfo.SetQuantizationOffset(offset);
7862
7863 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
7864
7865 LayerTestResult<T, OutputDim> result(outputTensorInfo);
7866 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
7867
7868 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7869 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7870
7871 armnn::MeanQueueDescriptor data;
7872 data.m_Parameters.m_Axis = axis;
7873 data.m_Parameters.m_KeepDims = keepDims;
7874 armnn::WorkloadInfo info;
7875 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
7876 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7877
7878 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
7879
7880 inputHandle->Allocate();
7881 outputHandle->Allocate();
7882
7883 CopyDataToITensorHandle(inputHandle.get(), input.origin());
7884
narpra011e4c31d2018-09-28 11:07:51 +01007885 workload->Execute();
7886
7887 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
7888
7889 return result;
7890}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007891
narpra011e4c31d2018-09-28 11:07:51 +01007892} // anonymous namespace
7893
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007894LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
7895 armnn::IWorkloadFactory& workloadFactory,
7896 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007897{
7898 const unsigned int inputShape[] = { 3, 2 };
7899 const unsigned int outputShape[] = { 1 };
7900
7901 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7902 std::vector<uint8_t> output({ 2 });
7903
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007904 return MeanTestHelper<uint8_t, 2, 1>(
7905 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007906}
7907
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007908LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
7909 armnn::IWorkloadFactory& workloadFactory,
7910 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007911{
7912 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7913 const unsigned int outputShape[] = { 1, 1, 2 };
7914
7915 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7916 std::vector<uint8_t> output({ 2, 2 });
7917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007918 return MeanTestHelper<uint8_t, 4, 3>(
7919 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007920}
7921
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007922LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
7923 armnn::IWorkloadFactory& workloadFactory,
7924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007925{
7926 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7927 const unsigned int outputShape[] = { 1, 1, 1, 2 };
7928
7929 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7930 std::vector<uint8_t> output({ 2, 2 });
7931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007932 return MeanTestHelper<uint8_t, 4, 4>(
7933 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007934}
7935
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007936LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
7937 armnn::IWorkloadFactory& workloadFactory,
7938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007939{
7940 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7941 const unsigned int outputShape[] = { 1, 3, 1, 1 };
7942
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007943 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01007944 std::vector<uint8_t> output({ 1, 3, 5 });
7945
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007946 return MeanTestHelper<uint8_t, 4, 4>(
7947 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007948}
7949
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007950LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
7951 armnn::IWorkloadFactory& workloadFactory,
7952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007953{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007954 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01007955 const unsigned int outputShape[] = { 2 };
7956
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007957 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
7958 24 });
7959 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01007960
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007961 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
7962 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007963 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01007964}
7965
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007966LayerTestResult<float, 1> MeanFloatSimpleTest(
7967 armnn::IWorkloadFactory& workloadFactory,
7968 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007969{
7970 const unsigned int inputShape[] = { 3, 2 };
7971 const unsigned int outputShape[] = { 1 };
7972
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007973 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
7974 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007975
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007976 return MeanTestHelper<float, 2, 1>(
7977 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007978}
7979
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007980LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
7981 armnn::IWorkloadFactory& workloadFactory,
7982 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007983{
7984 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7985 const unsigned int outputShape[] = { 3, 1, 2 };
7986
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007987 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
7988 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007989
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007990 return MeanTestHelper<float, 4, 3>(
7991 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007992}
7993
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007994LayerTestResult<float, 4> MeanFloatKeepDimsTest(
7995 armnn::IWorkloadFactory& workloadFactory,
7996 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007997{
7998 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7999 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8000
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008001 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8002 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008003
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008004 return MeanTestHelper<float, 4, 4>(
8005 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008006}
8007
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008008LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
8009 armnn::IWorkloadFactory& workloadFactory,
8010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008011{
8012 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8013 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8014
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008015 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8016 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008017
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008018 return MeanTestHelper<float, 4, 4>(
8019 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008020}
8021
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008022LayerTestResult<float, 1> MeanVtsFloat1Test(
8023 armnn::IWorkloadFactory& workloadFactory,
8024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008025{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008026 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008027 const unsigned int outputShape[] = { 2 };
8028
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008029 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8030 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8031 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008032
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008033 return MeanTestHelper<float, 3, 1>(
8034 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008035}
8036
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008037LayerTestResult<float, 3> MeanVtsFloat2Test(
8038 armnn::IWorkloadFactory& workloadFactory,
8039 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008040{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008041 const unsigned int inputShape[] = { 4, 3, 2 };
8042 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01008043
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008044 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8045 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8046 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008047
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008048 return MeanTestHelper<float, 3, 3>(
8049 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008050}
8051
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008052LayerTestResult<float, 3> MeanVtsFloat3Test(
8053 armnn::IWorkloadFactory& workloadFactory,
8054 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008055{
8056 const unsigned int inputShape[] = { 1, 2, 2, 1 };
8057 const unsigned int outputShape[] = { 1, 2, 1 };
8058
8059 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
8060 std::vector<float> output({ 1.5f, 3.5f });
8061
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008062 return MeanTestHelper<float, 4, 3>(
8063 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008064}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008065
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008066LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
8067 armnn::IWorkloadFactory& workloadFactory,
8068 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008069{
8070 // Create Initial Tensor
8071 // 1, 2, 3
8072 // 4, 5, 6
8073 // 7, 8, 9
8074
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008075 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
8076 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008077
8078 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
8079 {1, 2, 3,
8080 4, 5, 6,
8081 7, 8, 9
8082 });
8083
8084 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
8085 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
8086 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
8087 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
8088
8089 // Apply MaxPool poolSize = 1x1, stride=2x2
8090 // Result =
8091 // 1, 3
8092 // 7, 9
8093 armnn::Pooling2dDescriptor descriptor;
8094 descriptor.m_PoolHeight = 1;
8095 descriptor.m_PoolWidth = 1;
8096 descriptor.m_StrideX = 2;
8097 descriptor.m_StrideY = 2;
8098 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
8099
8100 armnn::Pooling2dQueueDescriptor queueDescriptor;
8101 queueDescriptor.m_Parameters = descriptor;
8102 armnn::WorkloadInfo workloadInfo;
8103 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
8104 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
8105
8106 // Create the MaxPool
8107 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
8108
8109 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
8110 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
8111 boost::multi_array<float, 4> resultMaxPool;
8112 resultMaxPool.resize(shape);
8113
8114
8115 // Create addition with another tensor the same size
8116 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
8117 // with the initial tensor.
8118 // 12, 16
8119 // 24, 28
8120
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008121 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
8122 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008123
8124 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
8125 {12, 16,
8126 24, 28,
8127 });
8128
8129 // Expected output tensor after MaxPool and Addition.
8130 LayerTestResult<float,4> addRet(addOutputTensorInfo);
8131 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
8132 {
8133 13, 19,
8134 31, 37
8135 }));
8136
8137 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
8138 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
8139
8140 armnn::AdditionQueueDescriptor data;
8141 armnn::WorkloadInfo info;
8142
8143 // Add the output of the MaxPool and the new tensor
8144 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
8145 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
8146 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
8147
8148 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
8149
8150 poolingInputHandle->Allocate();
8151 poolingOutputHandle->Allocate();
8152 addInputHandle->Allocate();
8153 addOutputHandle->Allocate();
8154
8155 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
8156 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
8157
8158 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
8159 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
8160
8161 workload->Execute();
8162 addWorkload->Execute();
8163
8164 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
8165
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008166 return addRet;
8167}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008168
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008169LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
8170 armnn::IWorkloadFactory& workloadFactory,
8171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008172{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008173 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008174}
8175
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008176LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
8177 armnn::IWorkloadFactory& workloadFactory,
8178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008179{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008180 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008181}
8182
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008183LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
8184 armnn::IWorkloadFactory& workloadFactory,
8185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008186{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008187 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008188}
8189
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008190LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
8191 armnn::IWorkloadFactory& workloadFactory,
8192 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008193{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008194 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008195}
8196
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008197LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
8198 armnn::IWorkloadFactory& workloadFactory,
8199 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008200{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008201 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008202}
8203
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008204LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
8205 armnn::IWorkloadFactory& workloadFactory,
8206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008207{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008208 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008209}
8210
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008211LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
8212 armnn::IWorkloadFactory& workloadFactory,
8213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008214{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008215 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008216}
8217
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008218LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
8219 armnn::IWorkloadFactory& workloadFactory,
8220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008221{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008222 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008223}
8224
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008225LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
8226 armnn::IWorkloadFactory& workloadFactory,
8227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008228{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008229 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008230}
8231
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008232LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
8233 armnn::IWorkloadFactory& workloadFactory,
8234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008235{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008236 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008237}
8238
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008239LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
8240 armnn::IWorkloadFactory& workloadFactory,
8241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008242{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008243 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008244}
8245
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008246LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
8247 armnn::IWorkloadFactory& workloadFactory,
8248 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008249{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008250 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008251}
8252
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008253LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
8254 armnn::IWorkloadFactory& workloadFactory,
8255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008256{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008257 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008258}
8259
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008260LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
8261 armnn::IWorkloadFactory& workloadFactory,
8262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008263{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008264 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008265}
8266
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008267LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
8268 armnn::IWorkloadFactory& workloadFactory,
8269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008270{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008271 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008272}
8273
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008274LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
8275 armnn::IWorkloadFactory& workloadFactory,
8276 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008277{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008278 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008279}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008280
8281namespace {
8282
8283template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008284LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
8285 armnn::IWorkloadFactory &workloadFactory,
8286 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8287 const armnn::DataLayout& dataLayout,
8288 const unsigned int *inputShape,
8289 const std::vector<T> &inputData,
8290 const std::vector<unsigned int> &blockShape,
8291 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
8292 const unsigned int *outputShape,
8293 const std::vector<T> &outputData,
8294 float scale = 1.0f,
8295 int32_t offset = 0)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008296 {
8297 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
8298
8299 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
8300 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
8301
8302 inputTensorInfo.SetQuantizationScale(scale);
8303 inputTensorInfo.SetQuantizationOffset(offset);
8304
8305 outputTensorInfo.SetQuantizationScale(scale);
8306 outputTensorInfo.SetQuantizationOffset(offset);
8307
8308 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
8309
8310 LayerTestResult<T, OutputDim> result(outputTensorInfo);
8311 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
8312
8313 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
8314 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8315
8316 armnn::BatchToSpaceNdQueueDescriptor data;
8317 data.m_Parameters.m_DataLayout = dataLayout;
8318 data.m_Parameters.m_BlockShape = blockShape;
8319 data.m_Parameters.m_Crops = crops;
8320 armnn::WorkloadInfo info;
8321 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
8322 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8323
8324 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
8325
8326 inputHandle->Allocate();
8327 outputHandle->Allocate();
8328
8329 CopyDataToITensorHandle(inputHandle.get(), input.origin());
8330
8331 workload->Execute();
8332
8333 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8334
8335 return result;
8336}
8337
8338} // anonymous namespace
8339
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008340LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
8341 armnn::IWorkloadFactory& workloadFactory,
8342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008343{
8344 const unsigned int inputShape[] = {4, 2, 2, 1};
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008345 const unsigned int outputShape[] = {1, 4, 4, 1};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008346
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008347 std::vector<float> input({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008348 // Batch 0, Height 0, Width (2) x Channel (1)
8349 1.0f, 3.0f,
8350 // Batch 0, Height 1, Width (2) x Channel (1)
8351 9.0f, 11.0f,
8352
8353
8354 // Batch 1, Height 0, Width (2) x Channel (1)
8355 2.0f, 4.0f,
8356 // Batch 1, Height 1, Width (2) x Channel (1)
8357 10.0f, 12.0f,
8358
8359
8360 // Batch 2, Height 0, Width (2) x Channel (1)
8361 5.0f, 7.0f,
8362 // Batch 2, Height 1, Width (2) x Channel (1)
8363 13.0f, 15.0f,
8364
8365 // Batch 3, Height 0, Width (2) x Channel (3)
8366 6.0f, 8.0f,
8367 // Batch 3, Height 1, Width (2) x Channel (1)
8368 14.0f, 16.0f
8369 });
8370
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008371 std::vector<float> expectedOutput({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008372 1.0f, 2.0f, 3.0f, 4.0f,
8373 5.0f, 6.0f, 7.0f, 8.0f,
8374 9.0f, 10.0f, 11.0f, 12.0f,
8375 13.0f, 14.0f, 15.0f, 16.0f
8376 });
8377
8378 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008379 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008380
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008381 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8382 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008383 crops, outputShape, expectedOutput);
8384}
8385
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008386LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
8387 armnn::IWorkloadFactory& workloadFactory,
8388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008389{
8390 const unsigned int inputShape[] = {4, 1, 1, 1};
8391 const unsigned int outputShape[] = {1, 2, 2, 1};
8392
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008393 std::vector<float> input({
8394 // Batch 0, Height 0, Width (2) x Channel (1)
8395 1.0f, 2.0f, 3.0f, 4.0f
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008396 });
8397
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008398 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008399
8400 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008401 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008402
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008403 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8404 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8405 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008406}
8407
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008408LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
8409 armnn::IWorkloadFactory& workloadFactory,
8410 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008411{
8412 const unsigned int inputShape[] = {4, 1, 1, 3};
8413 const unsigned int outputShape[] = {1, 2, 2, 3};
8414
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008415 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008416
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008417 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008418
8419 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008420 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008421
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008422 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8423 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8424 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008425}
8426
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008427LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test4(
8428 armnn::IWorkloadFactory& workloadFactory,
8429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8430{
8431 const unsigned int inputShape[] = {8, 1, 3, 1};
8432 const unsigned int outputShape[] = {2, 2, 4, 1};
8433
8434 std::vector<float> input({
8435 0.0f, 1.0f, 3.0f,
8436 0.0f, 9.0f, 11.0f,
8437 0.0f, 2.0f, 4.0f,
8438 0.0f, 10.0f, 12.0f,
8439 0.0f, 5.0f, 7.0f,
8440 0.0f, 13.0f, 15.0f,
8441 0.0f, 6.0f, 8.0f,
8442 0.0f, 14.0f, 16.0f
8443 });
8444
8445 std::vector<float> expectedOutput({
8446 1.0f, 2.0f, 3.0f, 4.0f,
8447 5.0f, 6.0f, 7.0f, 8.0f,
8448 9.0f, 10.0f, 11.0f, 12.0f,
8449 13.0f, 14.0f, 15.0f, 16.0f
8450 });
8451
8452 std::vector<unsigned int> blockShape({2, 2});
8453 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
8454
8455 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8456 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8457 crops, outputShape, expectedOutput);
8458}
8459
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008460LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
8461 armnn::IWorkloadFactory &workloadFactory,
8462 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008463{
8464 const unsigned int inputShape[] = {4, 3, 1, 1};
8465 const unsigned int outputShape[] = {1, 3, 2, 2};
8466
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008467 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008468
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008469 std::vector<float> expectedOutput({
8470 // Batch 0, Channel 0, Height (2) x Width (2)
8471 1.0f, 4.0f,
8472 7.0f, 10.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008473
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008474 // Batch 0, Channel 1, Height (2) x Width (2)
8475 2.0f, 5.0f,
8476 8.0f, 11.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008477
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008478 // Batch 0, Channel 2, Height (2) x Width (2)
8479 3.0f, 6.0f,
8480 9.0f, 12.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008481 });
8482
8483 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008484 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008485
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008486 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8487 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8488 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008489}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008490
Mike Kelly831faed2018-11-28 11:52:08 +00008491LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008492 armnn::IWorkloadFactory& workloadFactory,
8493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00008494{
8495 const unsigned int inputShape[] = {4, 1, 1, 1};
8496 const unsigned int outputShape[] = {1, 1, 2, 2};
8497
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008498 std::vector<float> input({
8499 // Batch 0, Height 0, Width (2) x Channel (1)
8500 1.0f, 2.0f, 3.0f, 4.0f
8501 });
Mike Kelly831faed2018-11-28 11:52:08 +00008502
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008503 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00008504
8505 std::vector<unsigned int> blockShape({2, 2});
8506 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8507
8508 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8509 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8510 crops, outputShape, expectedOutput);
8511}
8512
8513LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008514 armnn::IWorkloadFactory& workloadFactory,
8515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00008516{
8517 const unsigned int inputShape[] = {4, 3, 1, 1};
8518 const unsigned int outputShape[] = {1, 3, 2, 2};
8519
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008520 std::vector<float> input({1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00008521
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008522 std::vector<float> expectedOutput({
8523 // Batch 0, Channel 0, Height (2) x Width (2)
8524 1.0f, 7.0f,
8525 2.0f, 8.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00008526
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008527 // Batch 0, Channel 1, Height (2) x Width (2)
8528 3.0f, 9.0f,
8529 4.0f, 10.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00008530
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008531 // Batch 0, Channel 2, Height (2) x Width (2)
8532 5.0f, 11.0f,
8533 6.0f, 12.0f,
8534 });
Mike Kelly831faed2018-11-28 11:52:08 +00008535
8536 std::vector<unsigned int> blockShape({2, 2});
8537 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8538
8539 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8540 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8541 crops, outputShape, expectedOutput);
8542}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008543
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008544LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
8545 armnn::IWorkloadFactory& workloadFactory,
8546 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008547{
8548 const unsigned int inputShape[] = {4, 2, 2, 1};
8549 const unsigned int outputShape[] = {1, 4, 4, 1};
8550
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008551 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
8552 std::vector<uint8_t> expectedOutput({1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008553
8554 std::vector<unsigned int> blockShape({2, 2});
8555 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8556
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00008557 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
8558 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008559}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008560
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008561LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
8562 armnn::IWorkloadFactory& workloadFactory,
8563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8564{
8565 const unsigned int inputShape[] = {4, 1, 1, 1};
8566 const unsigned int outputShape[] = {1, 2, 2, 1};
8567
8568 std::vector<uint8_t> input({
8569 // Batch 0, Height 0, Width (2) x Channel (1)
8570 1, 2, 3, 4
8571 });
8572
8573 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
8574
8575 std::vector<unsigned int> blockShape({2, 2});
8576 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8577
8578 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8579 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8580 crops, outputShape, expectedOutput);
8581}
8582
8583LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
8584 armnn::IWorkloadFactory& workloadFactory,
8585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8586{
8587 const unsigned int inputShape[] = {4, 1, 1, 3};
8588 const unsigned int outputShape[] = {1, 2, 2, 3};
8589
8590 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
8591
8592 std::vector<uint8_t> expectedOutput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
8593
8594 std::vector<unsigned int> blockShape({2, 2});
8595 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8596
8597 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8598 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8599 crops, outputShape, expectedOutput);
8600}
8601
8602
8603LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
8604 armnn::IWorkloadFactory &workloadFactory,
8605 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8606{
8607 const unsigned int inputShape[] = {4, 3, 1, 1};
8608 const unsigned int outputShape[] = {1, 3, 2, 2};
8609
8610 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
8611
8612 std::vector<uint8_t> expectedOutput({
8613 // Batch 0, Channel 0, Height (2) x Width (2)
8614 1, 4,
8615 7, 10,
8616
8617 // Batch 0, Channel 1, Height (2) x Width (2)
8618 2, 5,
8619 8, 11,
8620
8621 // Batch 0, Channel 2, Height (2) x Width (2)
8622 3, 6,
8623 9, 12,
8624 });
8625
8626 std::vector<unsigned int> blockShape({2, 2});
8627 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8628
8629 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8630 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8631 crops, outputShape, expectedOutput);
8632}
8633
8634LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
8635 armnn::IWorkloadFactory& workloadFactory,
8636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8637{
8638 const unsigned int inputShape[] = {4, 1, 1, 1};
8639 const unsigned int outputShape[] = {1, 1, 2, 2};
8640
8641 std::vector<uint8_t> input({
8642 // Batch 0, Height 0, Width (2) x Channel (1)
8643 1, 2, 3, 4
8644 });
8645
8646 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
8647
8648 std::vector<unsigned int> blockShape({2, 2});
8649 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8650
8651 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8652 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8653 crops, outputShape, expectedOutput);
8654}
8655
8656LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
8657 armnn::IWorkloadFactory& workloadFactory,
8658 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8659{
8660 const unsigned int inputShape[] = {4, 3, 1, 1};
8661 const unsigned int outputShape[] = {1, 3, 2, 2};
8662
8663 std::vector<uint8_t> input({1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12});
8664
8665 std::vector<uint8_t> expectedOutput({
8666 // Batch 0, Channel 0, Height (2) x Width (2)
8667 1, 7,
8668 2, 8,
8669
8670 // Batch 0, Channel 1, Height (2) x Width (2)
8671 3, 9,
8672 4, 10,
8673
8674 // Batch 0, Channel 2, Height (2) x Width (2)
8675 5, 11,
8676 6, 12,
8677 });
8678
8679 std::vector<unsigned int> blockShape({2, 2});
8680 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8681
8682 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8683 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8684 crops, outputShape, expectedOutput);
8685}
8686
8687LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest4(
8688 armnn::IWorkloadFactory& workloadFactory,
8689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8690{
8691 const unsigned int inputShape[] = {8, 1, 1, 3};
8692 const unsigned int outputShape[] = {2, 1, 2, 4};
8693
8694 std::vector<uint8_t> input({
8695 0, 1, 3, 0, 9, 11,
8696 0, 2, 4, 0, 10, 12,
8697 0, 5, 7, 0, 13, 15,
8698 0, 6, 8, 0, 14, 16
8699 });
8700
8701 std::vector<uint8_t> expectedOutput({
8702 1, 2, 3, 4,
8703 5, 6, 7, 8,
8704 9, 10, 11, 12,
8705 13, 14, 15, 16
8706 });
8707
8708 std::vector<unsigned int> blockShape({2, 2});
8709 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
8710
8711 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8712 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8713 crops, outputShape, expectedOutput);
8714}
8715
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008716LayerTestResult<float, 4> StridedSlice4DFloat32Test(
8717 armnn::IWorkloadFactory& workloadFactory,
8718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8719{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008720 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008721}
8722
8723LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
8724 armnn::IWorkloadFactory& workloadFactory,
8725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8726{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008727 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008728}
8729
8730LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
8731 armnn::IWorkloadFactory& workloadFactory,
8732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8733{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008734 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008735}
8736
8737LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
8738 armnn::IWorkloadFactory& workloadFactory,
8739 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8740{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008741 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008742}
8743
8744LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
8745 armnn::IWorkloadFactory& workloadFactory,
8746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8747{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008748 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008749}
8750
8751LayerTestResult<float, 3> StridedSlice3DFloat32Test(
8752 armnn::IWorkloadFactory& workloadFactory,
8753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8754{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008755 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008756}
8757
8758LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
8759 armnn::IWorkloadFactory& workloadFactory,
8760 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8761{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008762 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008763}
8764
8765LayerTestResult<float, 2> StridedSlice2DFloat32Test(
8766 armnn::IWorkloadFactory& workloadFactory,
8767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008769 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008770}
8771
8772LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
8773 armnn::IWorkloadFactory& workloadFactory,
8774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8775{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008776 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008777}
8778
8779LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
8780 armnn::IWorkloadFactory& workloadFactory,
8781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8782{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008783 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008784}
8785
8786LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
8787 armnn::IWorkloadFactory& workloadFactory,
8788 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8789{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008790 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008791}
8792
8793LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
8794 armnn::IWorkloadFactory& workloadFactory,
8795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8796{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008797 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008798}
8799
8800LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
8801 armnn::IWorkloadFactory& workloadFactory,
8802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8803{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008804 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008805}
8806
8807LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
8808 armnn::IWorkloadFactory& workloadFactory,
8809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8810{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008811 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008812}
8813
8814LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
8815 armnn::IWorkloadFactory& workloadFactory,
8816 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8817{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008818 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008819}
8820
8821LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
8822 armnn::IWorkloadFactory& workloadFactory,
8823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8824{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008825 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008826}
8827
8828LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
8829 armnn::IWorkloadFactory& workloadFactory,
8830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8831{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008832 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008833}
8834
8835LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
8836 armnn::IWorkloadFactory& workloadFactory,
8837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8838{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008839 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008840}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008841
8842LayerTestResult<float, 4> Debug4DFloat32Test(
8843 armnn::IWorkloadFactory& workloadFactory,
8844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8845{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008846 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008847}
8848
8849LayerTestResult<float, 3> Debug3DFloat32Test(
8850 armnn::IWorkloadFactory& workloadFactory,
8851 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8852{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008853 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008854}
8855
8856LayerTestResult<float, 2> Debug2DFloat32Test(
8857 armnn::IWorkloadFactory& workloadFactory,
8858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8859{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008860 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008861}
8862
8863LayerTestResult<float, 1> Debug1DFloat32Test(
8864 armnn::IWorkloadFactory& workloadFactory,
8865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8866{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008867 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008868}
8869
8870LayerTestResult<uint8_t, 4> Debug4DUint8Test(
8871 armnn::IWorkloadFactory& workloadFactory,
8872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8873{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008874 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008875}
8876
8877LayerTestResult<uint8_t, 3> Debug3DUint8Test(
8878 armnn::IWorkloadFactory& workloadFactory,
8879 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8880{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008881 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008882}
8883
8884LayerTestResult<uint8_t, 2> Debug2DUint8Test(
8885 armnn::IWorkloadFactory& workloadFactory,
8886 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8887{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008888 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008889}
8890
8891LayerTestResult<uint8_t, 1> Debug1DUint8Test(
8892 armnn::IWorkloadFactory& workloadFactory,
8893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8894{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008895 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008896}
Matteo Martincigh49124022019-01-11 13:25:59 +00008897
narpra014951d842019-01-18 16:53:53 +00008898LayerTestResult<float, 1> Gather1DParamsFloatTest(
8899 armnn::IWorkloadFactory& workloadFactory,
8900 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8901{
8902 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
8903}
8904
8905LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
8906 armnn::IWorkloadFactory& workloadFactory,
8907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8908{
8909 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8910}
8911
8912LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
8913 armnn::IWorkloadFactory& workloadFactory,
8914 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8915{
8916 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
8917}
8918
8919LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
8920 armnn::IWorkloadFactory& workloadFactory,
8921 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8922{
8923 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8924}
8925
8926LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
8927 armnn::IWorkloadFactory& workloadFactory,
8928 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8929{
8930 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
8931}
8932
8933LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
8934 armnn::IWorkloadFactory& workloadFactory,
8935 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8936{
8937 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
8938 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00008939}