blob: 74f39971336d916c0b85b97df2afc1f199ee562e [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008#include "TypeUtils.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
33#include "ReshapeTestImpl.hpp"
34#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000035#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000036#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000046#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010047#include "QuantizeTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
telsoa01c577f2c2018-08-31 09:22:23 +010080// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000081template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +000082boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
83{
84 if(biasEnabled)
85 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000086 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000087 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
88 return bias;
89 }
90 else
91 {
92 return boost::multi_array<T, 1>();
93 }
94}
95
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000096template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000097LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
98 armnn::IWorkloadFactory& workloadFactory,
99 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
100 float qScale,
101 int32_t qOffset,
102 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000103 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000104{
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000106 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000107 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
108
telsoa01c577f2c2018-08-31 09:22:23 +0100109 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000110 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000111 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
112 QuantizedVector<T>(qScale, qOffset, {
113 1, 1, 1,
114 1, -1, 1,
115 1, 1, 1,
116 1, 1, 1,
117 1, 1, 1,
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130
131
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0
149 })));
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000152 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000153 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
154 QuantizedVector<T>(qScale, qOffset, {
155 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
156 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f,
161
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
166 })));
167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000168 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
169 workloadFactory,
170 memoryManager,
171 input,
172 kernel,
173 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
174 expectedOutput,
175 qScale,
176 qOffset,
177 layout);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000180template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
181 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000182LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
183 armnn::IWorkloadFactory& workloadFactory,
184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
185 float qScale,
186 int32_t qOffset,
187 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000188 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000193 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000194 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000197 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000198 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
199 QuantizedVector<T>(qScale, qOffset, {
200 1, 1, 1,
201 1, -1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0,
207
208 2, 2, 2,
209 2, 2, 2,
210 2, 2, 2,
211
212
213 0, 0, 0,
214 0, 0, 0,
215 0, 0, 0,
216
217 1, 1, 1,
218 1, 1, 1,
219 1, 1, 1,
220
221 0, 0, 0,
222 0, 0, 0,
223 0, 0, 0
224 })));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
231 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
243 })));
244
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000245 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
246 workloadFactory,
247 memoryManager,
248 input,
249 kernel,
250 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
251 expectedOutput,
252 qScale,
253 qOffset,
254 layout);
telsoa014fcda012018-03-09 14:13:49 +0000255}
256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000258LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
259 armnn::IWorkloadFactory& workloadFactory,
260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
261 float qScale,
262 int32_t qOffset,
263 bool biasEnabled,
264 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100265{
266 // Use common single-batch 5x5 image.
267
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000268 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
270 {
271 1, 5, 2, 3,
272 8, 7, 3, 6,
273 3, 3, 9, 1
274 });
275
276
277 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000278 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100279 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
280 4, 5, 6,
281 0, 0, 0,
282 3, 2, 1
283 });
284
285 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000286 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100287
288 const std::vector<float> outputData =
289 {
290 23, 41, 33, 21,
291 44, 65, 76, 52,
292 82, 85, 79, 42
293 };
294
295 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
296
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000297 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
298 workloadFactory,
299 memoryManager,
300 input,
301 kernel,
302 boost::multi_array<T, 1>(),
303 expectedOutput,
304 dataLayout,
305 qScale,
306 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100307}
308
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000310LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 float qScale,
314 int32_t qOffset,
315 bool biasEnabled,
316 const armnn::DataLayout& dataLayout)
317{
318 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000320 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
321 {
322 1, 5, 2, 3, 5,
323 8, 7, 3, 6, 3,
324 3, 3, 9, 1, 9,
325 4, 1, 8, 1, 3,
326 6, 8, 1, 9, 2
327 });
328
329 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000330 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000331 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
332 {
333 4, 5, 6,
334 0, 0, 0,
335 3, 2, 1
336 });
337
338 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000340
341 const std::vector<T> outputData =
342 {
343 23, 33, 24,
344 91, 99, 48,
345 26, 50, 19
346 };
347
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
349
350 uint32_t padLeft = 1;
351 uint32_t padTop = 1;
352 uint32_t padRight = 1;
353 uint32_t padBottom = 1;
354 uint32_t strideX = 2;
355 uint32_t strideY = 2;
356
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000357 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
358 workloadFactory,
359 memoryManager,
360 input,
361 kernel,
362 boost::multi_array<T, 1>(),
363 expectedOutput,
364 dataLayout,
365 qScale,
366 qOffset,
367 padLeft,
368 padTop,
369 padRight,
370 padBottom,
371 strideX,
372 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000373}
374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000379 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000381 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
382 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000383}
384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000385LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
386 armnn::IWorkloadFactory& workloadFactory,
387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
392 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000395LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000399 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000401 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
402 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000410 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
411 workloadFactory,
412 memoryManager,
413 0.f,
414 0,
415 biasEnabled,
416 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100417}
418
Mike Kelly7332ed82018-12-20 17:03:06 +0000419LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
420 armnn::IWorkloadFactory& workloadFactory,
421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
422 bool biasEnabled,
423 const armnn::DataLayout layout)
424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000425 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
426 workloadFactory,
427 memoryManager,
428 0.f,
429 0,
430 biasEnabled,
431 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000432}
433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000434LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
435 armnn::IWorkloadFactory& workloadFactory,
436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000442}
443
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000444template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
445 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000446LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
447 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000448 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000449 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000450 float qScale,
451 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000452{
telsoa01c577f2c2018-08-31 09:22:23 +0100453 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000454 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000455 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
456 QuantizedVector<T>(qScale, qOffset, {
457 11,21,31,
458 12,22,32,
459 13,23,33
460 })));
461
telsoa01c577f2c2018-08-31 09:22:23 +0100462 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000463 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000464 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
465 QuantizedVector<T>(qScale, qOffset, {
466 -11,-21,
467 -12,-22,
468 })));
469
telsoa01c577f2c2018-08-31 09:22:23 +0100470// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000471// Manually calculated like this:
472//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
473//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
474//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
475//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
476//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
477//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
478//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000479 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000480 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
481 QuantizedVector<T>(qScale, qOffset, {
482 0, 0, 0, 0, 0, 0,
483 -242, -594, -934, -372, 0, 0,
484 -495, -1190, -1850, -725, 0, 0,
485 -538, -1256, -1916, -748, 0, 0,
486 -273, -626, -946, -363, 0, 0,
487 0, 0, 0, 0, 0, 0,
488 0, 0, 0, 0, 0, 0,
489 0, 0, 0, 0, 0, 0
490 })));
491
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000492 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
493 workloadFactory,
494 memoryManager,
495 input,
496 kernel,
497 GetBias2<ArmnnBType>(false, qScale, qOffset),
498 expectedOutput,
499 qScale,
500 qOffset,
501 layout,
502 1, // Padding left.
503 2, // Padding top.
504 3, // Padding right.
505 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000506}
507
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000508template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
509 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000510LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
511 armnn::IWorkloadFactory& workloadFactory,
512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000513 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000514 float qScale,
515 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000516{
telsoa01c577f2c2018-08-31 09:22:23 +0100517 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000518 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000519 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
520 QuantizedVector<T>(qScale, qOffset, {
521 11,21,31,41,51,
522 12,22,32,42,52,
523 13,23,33,43,53,
524 14,24,34,44,54,
525 15,25,35,45,55,
526 })));
527
telsoa01c577f2c2018-08-31 09:22:23 +0100528 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000529 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000530 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
531 QuantizedVector<T>(qScale, qOffset, {
532 -11,-21,-31,-41,
533 -12,-22,-32,-42,
534 -13,-23,-33,-43,
535 -14,-24,-34,-44,
536 })));
537
telsoa01c577f2c2018-08-31 09:22:23 +0100538 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000539 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000540 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
541 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
542 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000543 -7140, -10580, -13940, -9300, -5230,
544 -9590, -14120, -18520, -12290, -6860,
545 -9980, -14560, -18960, -12560, -7000,
546 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100547 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000548 })));
549
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000550 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
551 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000552 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000553 input,
554 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000555 GetBias2<ArmnnBType>(false, qScale, qOffset),
telsoa014fcda012018-03-09 14:13:49 +0000556 expectedOutput,
557 qScale,
558 qOffset,
narpra015f703182018-10-26 16:24:58 +0100559 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100560 1, // Padding left.
561 1, // Padding top.
562 2, // Padding right.
563 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100564}
565
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000566template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
567 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000568LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
569 armnn::IWorkloadFactory& workloadFactory,
570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
571 float qScale,
572 int32_t qOffset,
573 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000574 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100575{
telsoa01c577f2c2018-08-31 09:22:23 +0100576 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000577 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100578 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
579 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
580 0, 1, 2, 3, 4,
581 5, 6, 7, 8, 9,
582 10, 11, 12, 13, 14,
583 15, 16, 17, 18, 19,
584 20, 21, 22, 23, 24,
585
586 25, 26, 27, 28, 29,
587 30, 31, 32, 33, 34,
588 35, 36, 37, 38, 39,
589 40, 41, 42, 43, 44,
590 45, 46, 47, 48, 49
591 })));
592
telsoa01c577f2c2018-08-31 09:22:23 +0100593 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000594 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100595 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
596 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
597 32, 31, 30, 29,
598 28, 27, 26, 25,
599 24, 23, 22, 21,
600 20, 19, 18, 17,
601
602 16, 15, 14, 13,
603 12, 11, 10, 9,
604 8, 7, 6, 5,
605 4, 3, 2, 1
606 })));
607
telsoa01c577f2c2018-08-31 09:22:23 +0100608 // Expected output is 1 batch of a 2-channel 5x5 image.
609 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000610 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100611 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
612 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
613 1062, 1580, 1850, 1530, 1117,
614 2140, 3108, 3500, 2842, 2042,
615 3580, 5068, 5460, 4342, 3062,
616 3618, 5072, 5390, 4248, 2971,
617 3074, 4282, 4510, 3533, 2457,
618 1550, 2284, 2362, 1955, 1428,
619 2910, 4206, 4342, 3528, 2536,
620 3390, 4886, 5022, 4068, 2916,
621 3566, 5056, 5182, 4133, 2922,
622 3100, 4352, 4452, 3517, 2465
623 })));
624
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000625 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
626 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000627 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100628 input,
629 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000630 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
surmeh013537c2c2018-05-18 16:31:43 +0100631 expectedOutput,
632 qScale,
633 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100634 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100635 1, // Padding left.
636 1, // Padding top.
637 2, // Padding right.
638 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100639 1, // strideX
640 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000641}
642
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000643template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
644 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000645LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
646 armnn::IWorkloadFactory& workloadFactory,
647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
648 float qScale,
649 int32_t qOffset,
650 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100651{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000652 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100653 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
654 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
655 0, 25,
656 1, 26,
657 2, 27,
658 3, 28,
659 4, 29,
660
661 5, 30,
662 6, 31,
663 7, 32,
664 8, 33,
665 9, 34,
666
667 10, 35,
668 11, 36,
669 12, 37,
670 13, 38,
671 14, 39,
672
673 15, 40,
674 16, 41,
675 17, 42,
676 18, 43,
677 19, 44,
678
679 20, 45,
680 21, 46,
681 22, 47,
682 23, 48,
683 24, 49
684 })));
685
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000686 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100687 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
688 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
Matteo Martincigh747ef822018-12-18 09:26:39 +0000689 32, 31, 30, 29,
690 28, 27, 26, 25,
691 24, 23, 22, 21,
692 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100693
Matteo Martincigh747ef822018-12-18 09:26:39 +0000694 16, 15, 14, 13,
695 12, 11, 10, 9,
696 8, 7, 6, 5,
697 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +0100698 })));
699
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000700 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100701 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
702 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
703 1062, 1550,
704 1580, 2284,
705 1850, 2362,
706 1530, 1955,
707 1117, 1428,
708
709 2140, 2910,
710 3108, 4206,
711 3500, 4342,
712 2842, 3528,
713 2042, 2536,
714
715 3580, 3390,
716 5068, 4886,
717 5460, 5022,
718 4342, 4068,
719 3062, 2916,
720
721 3618, 3566,
722 5072, 5056,
723 5390, 5182,
724 4248, 4133,
725 2971, 2922,
726
727 3074, 3100,
728 4282, 4352,
729 4510, 4452,
730 3533, 3517,
731 2457, 2465
732 })));
733
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000734 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
735 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000736 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100737 input,
738 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000739 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
Nikhil Rajcec6b652018-10-12 13:51:57 +0100740 expectedOutput,
741 qScale,
742 qOffset,
743 1, // Padding left.
744 1, // Padding top.
745 2, // Padding right.
746 2, // Padding bottom.
747 1, // strideX
748 1); // strideY
749}
750
telsoa014fcda012018-03-09 14:13:49 +0000751LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000752Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
753 armnn::IWorkloadFactory& workloadFactory,
754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000755 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000756{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000757 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
758 <armnn::DataType::Float32, armnn::DataType::Float32>(
759 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000760}
761
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000762LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
763 armnn::IWorkloadFactory& workloadFactory,
764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000765 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000766{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000767 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000768 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000769}
770
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000771LayerTestResult<float, 4> DepthwiseConvolution2dTest(
772 armnn::IWorkloadFactory& workloadFactory,
773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
774 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000775 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000776{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000777 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000778 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000779}
780
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000781LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
782 armnn::IWorkloadFactory& workloadFactory,
783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
784 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100785{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000786 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
787 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100788}
789
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000790LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
791 armnn::IWorkloadFactory& workloadFactory,
792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
793 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000794 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000795{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000796 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000797 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000798}
799
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000800LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
801 armnn::IWorkloadFactory& workloadFactory,
802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
803 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000804 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100805{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000806 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000807 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100808}
809
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000810LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
811 armnn::IWorkloadFactory& workloadFactory,
812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
813 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000814 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000815{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000816 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000817 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000818}
819
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000820LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
821 armnn::IWorkloadFactory& workloadFactory,
822 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
823 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000824 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000825{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000826 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000827 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000828}
829
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000830LayerTestResult<float, 4> Convolution1dTest(
831 armnn::IWorkloadFactory& workloadFactory,
832 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
833 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000834{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000835 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
836 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000837}
838
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000839LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
840 armnn::IWorkloadFactory& workloadFactory,
841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
842 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000843{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000844 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
845 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000846}
847
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000848LayerTestResult<float,4> CompareConvolution2dTest(
849 armnn::IWorkloadFactory& workloadFactory,
850 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
851 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000852{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000853 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
854 workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000855}
856
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000857LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000858 armnn::IWorkloadFactory& workloadFactory,
859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
860 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000861 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000862{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000863 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
864 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000865}
866
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000867LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
868 armnn::IWorkloadFactory& workloadFactory,
869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
870 armnn::IWorkloadFactory& refWorkloadFactory,
871 const armnn::DataLayout layout)
872{
873 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
874 workloadFactory, memoryManager, refWorkloadFactory, layout);
875}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000876
877LayerTestResult<float,4> SimpleNormalizationAcrossTest(
878 armnn::IWorkloadFactory& workloadFactory,
879 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000880{
881 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
882 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000883 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000884}
885
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000886LayerTestResult<float,4> SimpleNormalizationWithinTest(
887 armnn::IWorkloadFactory& workloadFactory,
888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000889{
890 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
891 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000892 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000893}
894
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000895LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
896 armnn::IWorkloadFactory& workloadFactory,
897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +0100898{
899 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
900 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000901 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100902}
903
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000904LayerTestResult<float,2> SimpleSoftmaxTest(
905 armnn::IWorkloadFactory& workloadFactory,
906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
907 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000908{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000909 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000910}
911
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000912LayerTestResult<float,3> Simple3dSoftmaxTest(
913 armnn::IWorkloadFactory& workloadFactory,
914 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
915 float beta)
916{
917 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
918}
919
920LayerTestResult<float,4> Simple4dSoftmaxTest(
921 armnn::IWorkloadFactory& workloadFactory,
922 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
923 float beta)
924{
925 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
926}
927
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000928LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
929 armnn::IWorkloadFactory& workloadFactory,
930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
931 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000932{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000933 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000934}
935
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000936LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
937 armnn::IWorkloadFactory& workloadFactory,
938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
939 float beta)
940{
941 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
942}
943
944LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
945 armnn::IWorkloadFactory& workloadFactory,
946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
947 float beta)
948{
949 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
950}
951
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000952LayerTestResult<float,4> CompareNormalizationTest(
953 armnn::IWorkloadFactory& workloadFactory,
954 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
955 armnn::IWorkloadFactory& refWorkloadFactory,
956 armnn::NormalizationAlgorithmChannel normChannel,
957 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000958{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000959 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000960}
961
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000962LayerTestResult<float,2> CompareSoftmaxTest(
963 armnn::IWorkloadFactory& workloadFactory,
964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000965 armnn::IWorkloadFactory& refWorkloadFactory,
966 float beta)
967{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000968 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
969 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000970}
971
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000972LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
973 armnn::IWorkloadFactory& workloadFactory,
974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000975 armnn::IWorkloadFactory& refWorkloadFactory,
976 float beta)
977{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000978 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
979 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000980}
981
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000982std::vector<LayerTestResult<float,3>> SplitterTest(
983 armnn::IWorkloadFactory& workloadFactory,
984 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000985{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000986 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000987}
988
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000989std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
990 armnn::IWorkloadFactory& workloadFactory,
991 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000992{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000993 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000994}
995
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000996LayerTestResult<float, 3> CopyViaSplitterTest(
997 armnn::IWorkloadFactory& workloadFactory,
998 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000999{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001000 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001001}
1002
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001003LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1004 armnn::IWorkloadFactory& workloadFactory,
1005 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001006{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001007 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001008}
1009
telsoa01c577f2c2018-08-31 09:22:23 +01001010LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001011 armnn::IWorkloadFactory& workloadFactory,
1012 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001013{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001014 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001015 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1016 { 2., 3., 3., 4. }));
1017
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001018 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001019 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1020 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1021 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001022 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
1023 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001024}
1025
1026LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001027 armnn::IWorkloadFactory& workloadFactory,
1028 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001029{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001030 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001031 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1032 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1033 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1034
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001035 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001036 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1037 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1038 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1039 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1040 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1041 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1042 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1043 0.02168f}));
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00001044 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001045}
1046
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001047LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1048 armnn::IWorkloadFactory& workloadFactory,
1049 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001050{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001051 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001052 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1053 {2., 3., 3., 4.}));
1054
1055
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001056 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001057 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1058 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1059 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1060
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001061 return LstmNoCifgNoPeepholeNoProjectionTestImpl(
1062 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001063}
1064
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001065LayerTestResult<float,3> MergerTest(
1066 armnn::IWorkloadFactory& workloadFactory,
1067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001068{
surmeh013537c2c2018-05-18 16:31:43 +01001069 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001070 unsigned int outputHeight = 6;
1071 unsigned int outputChannels = 3;
1072
surmeh013537c2c2018-05-18 16:31:43 +01001073 unsigned int inputWidth1 = 3;
1074 unsigned int inputHeight1 = 6;
1075 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001076
surmeh013537c2c2018-05-18 16:31:43 +01001077 unsigned int inputWidth2 = 3;
1078 unsigned int inputHeight2 = 6;
1079 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001080
telsoa01c577f2c2018-08-31 09:22:23 +01001081 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001082 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1083 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1084 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001085
1086 LayerTestResult<float,3> ret(outputTensorInfo);
1087
telsoa014fcda012018-03-09 14:13:49 +00001088 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001089 {
1090 1.0f, 2.0f, 3.0f,
1091 4.0f, 5.0f, 6.0f,
1092 7.0f, 8.0f, 9.0f,
1093 10.0f, 11.0f, 12.0f,
1094 13.0f, 14.0f, 15.0f,
1095 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001096
surmeh013537c2c2018-05-18 16:31:43 +01001097 19.0f, 20.0f, 21.0f,
1098 22.0f, 23.0f, 24.0f,
1099 25.0f, 26.0f, 27.0f,
1100 28.0f, 29.0f, 30.0f,
1101 31.0f, 32.0f, 33.0f,
1102 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001103
surmeh013537c2c2018-05-18 16:31:43 +01001104 37.0f, 38.0f, 39.0f,
1105 40.0f, 41.0f, 42.0f,
1106 43.0f, 44.0f, 45.0f,
1107 46.0f, 47.0f, 48.0f,
1108 49.0f, 50.0f, 51.0f,
1109 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001110 })
1111 );
1112
telsoa014fcda012018-03-09 14:13:49 +00001113 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1114 {
surmeh013537c2c2018-05-18 16:31:43 +01001115 1.0f, 2.0f, 3.0f,
1116 4.0f, 5.0f, 6.0f,
1117 7.0f, 8.0f, 9.0f,
1118 10.0f, 11.0f, 12.0f,
1119 13.0f, 14.0f, 15.0f,
1120 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001121
surmeh013537c2c2018-05-18 16:31:43 +01001122 19.0f, 20.0f, 21.0f,
1123 22.0f, 23.0f, 24.0f,
1124 25.0f, 26.0f, 27.0f,
1125 28.0f, 29.0f, 30.0f,
1126 31.0f, 32.0f, 33.0f,
1127 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001128 })
1129 );
1130
1131 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1132 {
surmeh013537c2c2018-05-18 16:31:43 +01001133 37.0f, 38.0f, 39.0f,
1134 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001135 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001136 46.0f, 47.0f, 48.0f,
1137 49.0f, 50.0f, 51.0f,
1138 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001139 })
1140 );
1141
telsoa01c577f2c2018-08-31 09:22:23 +01001142 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00001143 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
1144
telsoa01c577f2c2018-08-31 09:22:23 +01001145 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00001146 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
1147
telsoa014fcda012018-03-09 14:13:49 +00001148 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1149
1150 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1151
1152 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1153 subTensorsSupported ?
1154 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1155 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1156
1157 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1158 subTensorsSupported ?
1159 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1160 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1161
telsoa014fcda012018-03-09 14:13:49 +00001162 armnn::MergerQueueDescriptor data;
1163 armnn::WorkloadInfo info;
1164 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1165 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001166 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1167
1168 data.m_ViewOrigins.push_back(window1);
1169 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001170
1171 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
1172
1173 inputHandle1->Allocate();
1174 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001175 outputHandle->Allocate();
1176
1177 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1178 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001179
1180 workload->Execute();
1181
1182 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1183
1184 return ret;
1185}
1186
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001187LayerTestResult<float,4> AdditionTest(
1188 armnn::IWorkloadFactory& workloadFactory,
1189 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001190{
1191 unsigned int batchSize = 2;
1192 unsigned int channels = 2;
1193 unsigned int height = 2;
1194 unsigned int width = 3;
1195
1196 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1197 armnn::TensorInfo outputTensorInfo;
1198
1199 unsigned int shape[] = {batchSize, channels, height, width};
1200
1201 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1202 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1203 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1204
1205
1206 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1207 {
1208 0.0f, 2.0f, 1.0f,
1209 0.2f, 1.0f, 2.0f,
1210
1211 1.0f, 2.0f, 1.0f,
1212 0.2f, 1.0f, 2.0f,
1213
1214 0.0f, 2.0f, 1.0f,
1215 4.2f, 1.0f, 2.0f,
1216
1217 0.0f, 0.0f, 1.0f,
1218 0.2f, 1.0f, 2.0f,
1219 }));
1220
1221 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1222 {
1223 1.0f, 2.0f, 1.0f,
1224 0.0f, 1.0f, 2.0f,
1225
1226 1.0f, 2.0f, -2.0f,
1227 0.2f, 1.0f, 2.0f,
1228
1229 0.0f, 2.0f, 1.0f,
1230 4.2f, 0.0f, -3.0f,
1231
1232 0.0f, 0.0f, 1.0f,
1233 0.7f, 1.0f, 5.0f,
1234 }));
1235
1236 LayerTestResult<float,4> ret(outputTensorInfo);
1237 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1238 {
1239 1.0f, 4.0f, 2.0f,
1240 0.2f, 2.0f, 4.0f,
1241
1242 2.0f, 4.0f, -1.0f,
1243 0.4f, 2.0f, 4.0f,
1244
1245 0.0f, 4.0f, 2.0f,
1246 8.4f, 1.0f, -1.0f,
1247
1248 0.0f, 0.0f, 2.0f,
1249 0.9f, 2.0f, 7.0f,
1250 }));
1251
1252 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1253 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1254 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1255
1256 armnn::AdditionQueueDescriptor data;
1257 armnn::WorkloadInfo info;
1258 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1259 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1260 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1261
1262 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1263
1264 inputHandle1->Allocate();
1265 inputHandle2->Allocate();
1266 outputHandle->Allocate();
1267
1268 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1269 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1270
1271 workload->Execute();
1272
1273 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1274
1275 return ret;
1276}
1277
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001278template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001279LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1280 armnn::IWorkloadFactory& workloadFactory,
1281 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001282 float qScale,
1283 int32_t qOffset)
1284{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001285 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
1286 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
1287 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001288
1289 if (armnn::IsQuantizedType<T>())
1290 {
1291 inputTensorInfo1.SetQuantizationScale(qScale);
1292 inputTensorInfo1.SetQuantizationOffset(qOffset);
1293 inputTensorInfo2.SetQuantizationScale(qScale);
1294 inputTensorInfo2.SetQuantizationOffset(qOffset);
1295 outputTensorInfo.SetQuantizationScale(qScale);
1296 outputTensorInfo.SetQuantizationOffset(qOffset);
1297 }
1298
1299 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1300 {
1301 0.0f,
1302 1.0f,
1303
1304 2.0f,
1305 3.0f,
1306
1307 4.0f,
1308 5.0f,
1309 }));
1310
1311 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1312 {
1313 0.5f, 1.5f, 2.5f,
1314 3.5f, 4.5f, 5.5f,
1315 }));
1316
1317 LayerTestResult<T,4> ret(outputTensorInfo);
1318 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1319 {
1320 0.5f, 1.5f, 2.5f,
1321 4.5f, 5.5f, 6.5f,
1322
1323 2.5f, 3.5f, 4.5f,
1324 6.5f, 7.5f, 8.5f,
1325
1326 4.5f, 5.5f, 6.5f,
1327 8.5f, 9.5f, 10.5f,
1328 }));
1329
1330 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1331 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1332 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1333
1334 armnn::AdditionQueueDescriptor data;
1335 armnn::WorkloadInfo info;
1336 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1337 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1338 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1339
1340 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1341
1342 inputHandle1->Allocate();
1343 inputHandle2->Allocate();
1344 outputHandle->Allocate();
1345
1346 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1347 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1348
1349 workload->Execute();
1350
1351 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1352
1353 return ret;
1354}
1355
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001356template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001357LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1358 armnn::IWorkloadFactory& workloadFactory,
1359 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001360 float qScale,
1361 int32_t qOffset)
1362{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001363 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
1364 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
1365 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001366
1367 if (armnn::IsQuantizedType<T>())
1368 {
1369 inputTensorInfo1.SetQuantizationScale(qScale);
1370 inputTensorInfo1.SetQuantizationOffset(qOffset);
1371 inputTensorInfo2.SetQuantizationScale(qScale);
1372 inputTensorInfo2.SetQuantizationOffset(qOffset);
1373 outputTensorInfo.SetQuantizationScale(qScale);
1374 outputTensorInfo.SetQuantizationOffset(qOffset);
1375 }
1376
1377 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1378 {
1379 0.0f, 1.0f, 2.0f,
1380 3.0f, 4.0f, 5.0f,
1381 6.0f, 7.0f, 8.0f,
1382 9.0f, 10.0f, 11.0f,
1383 12.0f, 13.0f, 14.0f,
1384 15.0f, 16.0f, 17.0f,
1385 }));
1386
1387 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1388 {
1389 0.5f,
1390 }));
1391
1392 LayerTestResult<T,4> ret(outputTensorInfo);
1393 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1394 {
1395 0.5f, 1.5f, 2.5f,
1396 3.5f, 4.5f, 5.5f,
1397 6.5f, 7.5f, 8.5f,
1398 9.5f, 10.5f, 11.5f,
1399 12.5f, 13.5f, 14.5f,
1400 15.5f, 16.5f, 17.5f,
1401 }));
1402
1403 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1404 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1405 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1406
1407 armnn::AdditionQueueDescriptor data;
1408 armnn::WorkloadInfo info;
1409 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1410 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1411 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1412
1413 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1414
1415 inputHandle1->Allocate();
1416 inputHandle2->Allocate();
1417 outputHandle->Allocate();
1418
1419 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1420 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1421
1422 workload->Execute();
1423
1424 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1425
1426 return ret;
1427}
1428
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001429LayerTestResult<float, 4> AdditionBroadcastTest(
1430 armnn::IWorkloadFactory& workloadFactory,
1431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001432{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001433 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
1434 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001435}
1436
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001437LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1438 armnn::IWorkloadFactory& workloadFactory,
1439 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001440{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001441 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
1442 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001443}
1444
Sadik Armagan2999a022019-04-09 14:20:12 +01001445LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
1446 armnn::IWorkloadFactory& workloadFactory,
1447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1448{
1449 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
1450 workloadFactory, memoryManager, 2.f, 0);
1451}
1452
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001453LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1454 armnn::IWorkloadFactory& workloadFactory,
1455 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001456{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001457 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
1458 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001459}
1460
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001461LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1462 armnn::IWorkloadFactory& workloadFactory,
1463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001464{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001465 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
1466 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001467}
1468
Sadik Armagan2999a022019-04-09 14:20:12 +01001469LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
1470 armnn::IWorkloadFactory& workloadFactory,
1471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1472{
1473 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
1474 workloadFactory, memoryManager, 0.1333333f, 0);
1475}
1476
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001477LayerTestResult<float,4> CompareAdditionTest(
1478 armnn::IWorkloadFactory& workloadFactory,
1479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1480 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001481{
1482 unsigned int batchSize = 4;
1483 unsigned int channels = 1;
1484 unsigned int height = 2;
1485 unsigned int width = 3;
1486
1487 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1488 armnn::TensorInfo outputTensorInfo;
1489
1490 unsigned int shape[] = {batchSize, channels, height, width};
1491
1492 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1493 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1494 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1495
1496 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1497 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1498
1499 LayerTestResult<float,4> ret(outputTensorInfo);
1500
1501 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1502 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1503 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1504
1505 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1506 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1507 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1508
1509 armnn::AdditionQueueDescriptor data;
1510 armnn::WorkloadInfo info;
1511 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1512 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1513 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1514
1515 armnn::AdditionQueueDescriptor refData = data;
1516 armnn::WorkloadInfo refInfo = info;
1517 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1518 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1519 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1520
1521 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1522 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1523
1524 inputHandle1->Allocate();
1525 inputHandle2->Allocate();
1526 outputHandle->Allocate();
1527 inputHandle1Ref->Allocate();
1528 inputHandle2Ref->Allocate();
1529 outputHandleRef->Allocate();
1530
1531 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1532 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1533 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1534 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1535
1536 workload->Execute();
1537 workloadRef->Execute();
1538
1539 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1540 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1541
1542 return ret;
1543}
1544
surmeh01bceff2f2018-03-29 16:29:27 +01001545namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01001546template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001547LayerTestResult<T, 4> DivisionTestHelper(
1548 armnn::IWorkloadFactory& workloadFactory,
1549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1550 const unsigned int shape0[4],
1551 const std::vector<T>& values0,
1552 float scale0,
1553 int32_t offset0,
1554 const unsigned int shape1[4],
1555 const std::vector<T> & values1,
1556 float scale1,
1557 int32_t offset1,
1558 const unsigned int outShape[4],
1559 const std::vector<T> & outValues,
1560 float outScale,
1561 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001562{
Sadik Armagan2999a022019-04-09 14:20:12 +01001563 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
1564 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
1565 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001566
David Beck5cd01f32018-09-12 16:00:08 +01001567 inputTensorInfo0.SetQuantizationScale(scale0);
1568 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001569
David Beck5cd01f32018-09-12 16:00:08 +01001570 inputTensorInfo1.SetQuantizationScale(scale1);
1571 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001572
David Beck5cd01f32018-09-12 16:00:08 +01001573 outputTensorInfo.SetQuantizationScale(outScale);
1574 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001575
David Beck5cd01f32018-09-12 16:00:08 +01001576 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1577 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001578
David Beck5cd01f32018-09-12 16:00:08 +01001579 LayerTestResult<T, 4> result(outputTensorInfo);
1580 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001581
David Beck5cd01f32018-09-12 16:00:08 +01001582 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1583 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1584 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001585
David Beck5cd01f32018-09-12 16:00:08 +01001586 armnn::DivisionQueueDescriptor data;
1587 armnn::WorkloadInfo info;
1588 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1589 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1590 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001591
David Beck5cd01f32018-09-12 16:00:08 +01001592 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001593
David Beck5cd01f32018-09-12 16:00:08 +01001594 inputHandle0->Allocate();
1595 inputHandle1->Allocate();
1596 outputHandle->Allocate();
1597
1598 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1599 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1600
David Beck5cd01f32018-09-12 16:00:08 +01001601 workload->Execute();
1602
1603 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1604
1605 return result;
1606}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001607} // anonymous namespace
1608
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001609LayerTestResult<float,4> DivisionByZeroTest(
1610 armnn::IWorkloadFactory& workloadFactory,
1611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001612{
1613 const unsigned int width = 2;
1614 const unsigned int height = 2;
1615 const unsigned int channelCount = 2;
1616 const unsigned int batchSize = 2;
1617
1618 unsigned int shape[] = { batchSize, channelCount, height, width };
1619
1620 std::vector<float> input0({
1621 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1622 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1623
1624 std::vector<float> input1({
1625 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1626 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1627
1628 std::vector<float> output({
1629 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1630 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1631
Sadik Armagan2999a022019-04-09 14:20:12 +01001632 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1633 memoryManager,
1634 shape, input0, 1.0f, 0,
1635 shape, input1, 1.0f, 0,
1636 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001637}
1638
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001639LayerTestResult<float,4> DivisionTest(
1640 armnn::IWorkloadFactory& workloadFactory,
1641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001642{
1643 const unsigned int width = 2;
1644 const unsigned int height = 2;
1645 const unsigned int channelCount = 2;
1646 const unsigned int batchSize = 2;
1647
1648 unsigned int shape[] = { batchSize, channelCount, height, width };
1649
1650 std::vector<float> input0({
1651 2, 2, 2, 2, 3, 3, 3, 3,
1652 4, 4, 4, 4, 5, 5, 5, 5 });
1653
1654 std::vector<float> input1({
1655 1, 1, 1, 1, 2, 2, 2, 2,
1656 4, 4, 4, 4, 4, 4, 4, 4 });
1657
1658 std::vector<float> output({
1659 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1660 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1661
David Beck5cd01f32018-09-12 16:00:08 +01001662
Sadik Armagan2999a022019-04-09 14:20:12 +01001663 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1664 memoryManager,
1665 shape, input0, 1.0f, 0,
1666 shape, input1, 1.0f, 0,
1667 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001668}
1669
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001670LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1671 armnn::IWorkloadFactory& workloadFactory,
1672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001673{
1674 unsigned int shape0[] = { 1, 2, 2, 2 };
1675 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1676
1677 unsigned int shape1[] = { 1, 1, 1, 1 };
1678 std::vector<float> input1({ 2 });
1679
1680 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1681
David Beck5cd01f32018-09-12 16:00:08 +01001682
Sadik Armagan2999a022019-04-09 14:20:12 +01001683 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1684 memoryManager,
1685 shape0, input0, 1.0f, 0,
1686 shape1, input1, 1.0f, 0,
1687 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001688}
1689
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001690LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1691 armnn::IWorkloadFactory& workloadFactory,
1692 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001693{
1694 unsigned int shape0[] = { 1, 3, 3, 2 };
1695 std::vector<float> input0({
1696 1, 4, 3, 8, 5, 12,
1697 7, 16, 9, 20, 11, 24,
1698 13, 28, 15, 32, 17, 36});
1699
1700 unsigned int shape1[] = { 1, 1, 1, 2 };
1701 std::vector<float> input1({ 1, 2 });
1702
1703 std::vector<float> output({
1704 1, 2, 3, 4, 5, 6,
1705 7, 8, 9, 10, 11, 12,
1706 13, 14, 15, 16, 17, 18});
1707
Sadik Armagan2999a022019-04-09 14:20:12 +01001708 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1709 memoryManager,
1710 shape0, input0, 1.0f, 0,
1711 shape1, input1, 1.0f, 0,
1712 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001713}
1714
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001715LayerTestResult<uint8_t,4> DivisionUint8Test(
1716 armnn::IWorkloadFactory& workloadFactory,
1717 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001718{
1719 const unsigned int width = 2;
1720 const unsigned int height = 2;
1721 const unsigned int channelCount = 2;
1722 const unsigned int batchSize = 2;
1723
1724 unsigned int shape[] = { batchSize, channelCount, height, width };
1725
1726 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1727 4, 4, 4, 4, 5, 5, 5, 5 });
1728
1729 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1730 4, 4, 4, 4, 4, 4, 4, 4 });
1731
1732 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1733 4, 4, 4, 4, 5, 5, 5, 5});
1734
1735
Sadik Armagan2999a022019-04-09 14:20:12 +01001736 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1737 memoryManager,
1738 shape, input0, 1.0f, 0,
1739 shape, input1, 1.0f, 0,
1740 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001741}
1742
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001743LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1744 armnn::IWorkloadFactory& workloadFactory,
1745 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001746{
1747 unsigned int shape0[] = { 1, 2, 2, 2 };
1748 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1749
1750 unsigned int shape1[] = { 1, 1, 1, 1 };
1751 std::vector<uint8_t> input1({ 2 });
1752
1753 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1754
Sadik Armagan2999a022019-04-09 14:20:12 +01001755 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1756 memoryManager,
1757 shape0, input0, 1.0f, 0,
1758 shape1, input1, 1.0f, 0,
1759 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001760}
1761
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001762LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1763 armnn::IWorkloadFactory& workloadFactory,
1764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001765{
1766 unsigned int shape0[] = { 1, 3, 3, 2 };
1767 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1768 7, 16, 9, 20, 11, 24,
1769 13, 28, 15, 32, 17, 36});
1770
1771 unsigned int shape1[] = { 1, 1, 1, 2 };
1772 std::vector<uint8_t> input1({ 1, 2 });
1773
1774 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1775 7, 8, 9, 10, 11, 12,
1776 13, 14, 15, 16, 17, 18});
1777
Sadik Armagan2999a022019-04-09 14:20:12 +01001778 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1779 memoryManager,
1780 shape0, input0, 1.0f, 0,
1781 shape1, input1, 1.0f, 0,
1782 shape0, output, 1.0f, 0);
1783}
1784
1785LayerTestResult<int16_t,4> DivisionInt16Test(
1786 armnn::IWorkloadFactory& workloadFactory,
1787 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1788{
1789 unsigned int shape[] = { 2, 2, 2, 2 };
1790
1791 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1792 4, 4, 4, 4, 5, 5, 5, 5 });
1793
1794 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1795 4, 4, 4, 4, 4, 4, 4, 4 });
1796
1797 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1798 4, 4, 4, 4, 5, 5, 5, 5});
1799
1800
1801 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
1802 memoryManager,
1803 shape, input0, 1.0f, 0,
1804 shape, input1, 1.0f, 0,
1805 shape, output, 0.25f, 0);
1806}
1807
1808LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
1809 armnn::IWorkloadFactory& workloadFactory,
1810 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1811{
1812 unsigned int shape0[] = { 1, 2, 2, 2 };
1813 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1814
1815 unsigned int shape1[] = { 1, 1, 1, 1 };
1816 std::vector<int16_t> input1({ 2 });
1817
1818 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1819
1820 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
1821 memoryManager,
1822 shape0, input0, 1.0f, 0,
1823 shape1, input1, 1.0f, 0,
1824 shape0, output, 1.0f, 0);
1825}
1826
1827LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
1828 armnn::IWorkloadFactory& workloadFactory,
1829 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1830{
1831 unsigned int shape0[] = { 1, 3, 3, 2 };
1832 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
1833 7, 16, 9, 20, 11, 24,
1834 13, 28, 15, 32, 17, 36});
1835
1836 unsigned int shape1[] = { 1, 1, 1, 2 };
1837 std::vector<int16_t> input1({ 1, 2 });
1838
1839 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
1840 7, 8, 9, 10, 11, 12,
1841 13, 14, 15, 16, 17, 18});
1842
1843 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
1844 memoryManager,
1845 shape0, input0, 1.0f, 0,
1846 shape1, input1, 1.0f, 0,
1847 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001848}
1849
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001850template<typename DescriptorType>
1851std::unique_ptr<armnn::IWorkload> CreateWorkload(
1852 const armnn::IWorkloadFactory& workloadFactory,
1853 const armnn::WorkloadInfo& info,
1854 const DescriptorType& descriptor)
1855{
1856 return CreateWorkload(workloadFactory, info, descriptor);
1857};
1858
1859template<>
1860std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
1861 const armnn::IWorkloadFactory& workloadFactory,
1862 const armnn::WorkloadInfo& info,
1863 const armnn::MaximumQueueDescriptor& descriptor)
1864{
1865 return workloadFactory.CreateMaximum(descriptor, info);
1866}
1867
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001868template<>
1869std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
1870 const armnn::IWorkloadFactory& workloadFactory,
1871 const armnn::WorkloadInfo& info,
1872 const armnn::MinimumQueueDescriptor& descriptor)
1873{
1874 return workloadFactory.CreateMinimum(descriptor, info);
1875}
1876
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001877template<>
1878std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
1879 const armnn::IWorkloadFactory& workloadFactory,
1880 const armnn::WorkloadInfo& info,
1881 const armnn::EqualQueueDescriptor& descriptor)
1882{
1883 return workloadFactory.CreateEqual(descriptor, info);
1884}
1885
FrancisMurtagh878f0232018-12-19 10:56:15 +00001886template<>
1887std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
1888 const armnn::IWorkloadFactory& workloadFactory,
1889 const armnn::WorkloadInfo& info,
1890 const armnn::GreaterQueueDescriptor& descriptor)
1891{
1892 return workloadFactory.CreateGreater(descriptor, info);
1893}
1894
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001895namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00001896
1897template <typename Descriptor,
1898 armnn::DataType ArmnnTypeInput,
1899 armnn::DataType ArmnnTypeOutput,
1900 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
1901 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
1902LayerTestResult<TOutput, 4> ElementwiseTestHelper(
1903 armnn::IWorkloadFactory & workloadFactory,
1904 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
1905 const unsigned int shape0[4], std::vector<TInput> values0,
1906 const unsigned int shape1[4], std::vector<TInput> values1,
1907 const unsigned int outShape[4], std::vector<TOutput> outValues,
1908 float qScale = 0.0f, int qOffset = 0)
1909{
1910 const size_t dimensionCount = 4;
1911 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
1912 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
1913 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
1914
1915 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
1916 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
1917
1918 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001919 {
kevmay012b4d88e2019-01-24 14:05:09 +00001920 inputTensorInfo0.SetQuantizationScale(qScale);
1921 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001922
kevmay012b4d88e2019-01-24 14:05:09 +00001923 inputTensorInfo1.SetQuantizationScale(qScale);
1924 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001925
kevmay012b4d88e2019-01-24 14:05:09 +00001926 outputTensorInfo.SetQuantizationScale(qScale);
1927 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001928 }
kevmay012b4d88e2019-01-24 14:05:09 +00001929
1930 LayerTestResult<TOutput,4> ret(outputTensorInfo);
1931
1932 if(ArmnnTypeOutput == armnn::DataType::Boolean)
1933 {
1934 ret.compareBoolean = true;
1935 }
1936
1937 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1938 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1939 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1940
1941 Descriptor data;
1942 armnn::WorkloadInfo info;
1943 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1944 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1945 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1946 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
1947
1948 inputHandle0->Allocate();
1949 inputHandle1->Allocate();
1950 outputHandle->Allocate();
1951
1952 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1953 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1954
1955 ExecuteWorkload(*workload, memoryManager);
1956
1957 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1958
1959 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
1960 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001961}
1962
kevmay012b4d88e2019-01-24 14:05:09 +00001963template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
1964LayerTestResult<T, 4> ElementwiseTestHelper(
1965 armnn::IWorkloadFactory & workloadFactory,
1966 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
1967 const unsigned int shape0[4], std::vector<T> values0,
1968 const unsigned int shape1[4], std::vector<T> values1,
1969 const unsigned int outShape[4], std::vector<T> outValues,
1970 float qScale = 0.0f, int qOffset = 0)
1971{
1972 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
1973 (workloadFactory,
1974 memoryManager,
1975 shape0,
1976 values0,
1977 shape1,
1978 values1,
1979 outShape,
1980 outValues,
1981 qScale,
1982 qOffset);
1983}
1984}
1985
1986LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
1987 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001988{
1989 const unsigned int width = 2;
1990 const unsigned int height = 2;
1991 const unsigned int channelCount = 2;
1992 const unsigned int batchSize = 2;
1993
1994 unsigned int shape[] = { batchSize, channelCount, height, width };
1995
1996 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
1997 3, 3, 3, 3, 4, 4, 4, 4 });
1998
1999 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2000 5, 5, 5, 5, 4, 4, 4, 4 });
2001
kevmay012b4d88e2019-01-24 14:05:09 +00002002 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2003 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002004
kevmay012b4d88e2019-01-24 14:05:09 +00002005 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002006 workloadFactory,
2007 memoryManager,
2008 shape,
2009 input0,
2010 shape,
2011 input1,
2012 shape,
2013 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002014}
2015
kevmay012b4d88e2019-01-24 14:05:09 +00002016LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002017 armnn::IWorkloadFactory& workloadFactory,
2018 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2019{
2020 unsigned int shape0[] = { 1, 2, 2, 2 };
2021 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2022
2023 unsigned int shape1[] = { 1, 1, 1, 1 };
2024 std::vector<float> input1({ 1 });
2025
kevmay012b4d88e2019-01-24 14:05:09 +00002026 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002027
kevmay012b4d88e2019-01-24 14:05:09 +00002028 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002029 workloadFactory,
2030 memoryManager,
2031 shape0,
2032 input0,
2033 shape1,
2034 input1,
2035 shape0,
2036 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002037}
2038
kevmay012b4d88e2019-01-24 14:05:09 +00002039LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002040 armnn::IWorkloadFactory& workloadFactory,
2041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2042{
2043 const unsigned int shape0[] = { 1, 2, 2, 3 };
2044 const unsigned int shape1[] = { 1, 1, 1, 3 };
2045
2046 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2047 7, 8, 9, 10, 11, 12 });
2048
2049 std::vector<float> input1({ 1, 2, 3});
2050
kevmay012b4d88e2019-01-24 14:05:09 +00002051 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2052 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002053
kevmay012b4d88e2019-01-24 14:05:09 +00002054 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002055 workloadFactory,
2056 memoryManager,
2057 shape0,
2058 input0,
2059 shape1,
2060 input1,
2061 shape0,
2062 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002063}
2064
2065LayerTestResult<uint8_t, 4> EqualUint8Test(
2066 armnn::IWorkloadFactory& workloadFactory,
2067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2068{
2069 unsigned int shape[] = { 2, 2, 2, 2 };
2070
2071 // See dequantized values to the right.
2072 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002073 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002074
2075 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2076 3, 3, 3, 3, 5, 5, 5, 5 });
2077
2078 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2079 1, 1, 1, 1, 0, 0, 0, 0 });
2080
kevmay012b4d88e2019-01-24 14:05:09 +00002081 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2082 armnn::DataType::QuantisedAsymm8,
2083 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002084 workloadFactory,
2085 memoryManager,
2086 shape,
2087 input0,
2088 shape,
2089 input1,
2090 shape,
2091 output,
2092 1.0f,
2093 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002094}
2095
2096LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2097 armnn::IWorkloadFactory& workloadFactory,
2098 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2099{
2100 const unsigned int shape0[] = { 1, 2, 2, 3 };
2101 const unsigned int shape1[] = { 1, 1, 1, 1 };
2102
2103 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2104 7, 8, 9, 10, 11, 12 });
2105
2106 std::vector<uint8_t> input1({ 1 });
2107
2108 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2109 0, 0, 0, 0, 0, 0 });
2110
kevmay012b4d88e2019-01-24 14:05:09 +00002111 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2112 armnn::DataType::QuantisedAsymm8,
2113 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002114 workloadFactory,
2115 memoryManager,
2116 shape0,
2117 input0,
2118 shape1,
2119 input1,
2120 shape0,
2121 output,
2122 1.0f,
2123 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002124}
2125
2126LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2127 armnn::IWorkloadFactory& workloadFactory,
2128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2129{
2130 const unsigned int shape0[] = { 1, 2, 2, 3 };
2131 const unsigned int shape1[] = { 1, 1, 1, 3 };
2132
2133 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2134 7, 8, 9, 10, 11, 12 });
2135
2136 std::vector<uint8_t> input1({ 1, 1, 3});
2137
2138 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2139 0, 0, 0, 0, 0, 0 });
2140
kevmay012b4d88e2019-01-24 14:05:09 +00002141 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2142 armnn::DataType::QuantisedAsymm8,
2143 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002144 workloadFactory,
2145 memoryManager,
2146 shape0,
2147 input0,
2148 shape1,
2149 input1,
2150 shape0,
2151 output,
2152 1.0f,
2153 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002154}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002155
kevmay012b4d88e2019-01-24 14:05:09 +00002156LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002157 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2158{
2159 const unsigned int width = 2;
2160 const unsigned int height = 2;
2161 const unsigned int channelCount = 2;
2162 const unsigned int batchSize = 2;
2163
2164 unsigned int shape[] = { batchSize, channelCount, height, width };
2165
2166 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2167 3, 3, 3, 3, 4, 4, 4, 4 });
2168
2169 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2170 5, 5, 5, 5, 4, 4, 4, 4 });
2171
kevmay012b4d88e2019-01-24 14:05:09 +00002172 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2173 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002174
kevmay012b4d88e2019-01-24 14:05:09 +00002175 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002176 workloadFactory,
2177 memoryManager,
2178 shape,
2179 input0,
2180 shape,
2181 input1,
2182 shape,
2183 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002184}
2185
kevmay012b4d88e2019-01-24 14:05:09 +00002186LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002187 armnn::IWorkloadFactory& workloadFactory,
2188 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2189{
2190 unsigned int shape0[] = { 1, 2, 2, 2 };
2191 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2192
2193 unsigned int shape1[] = { 1, 1, 1, 1 };
2194 std::vector<float> input1({ 1 });
2195
kevmay012b4d88e2019-01-24 14:05:09 +00002196 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002197
kevmay012b4d88e2019-01-24 14:05:09 +00002198 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002199 workloadFactory,
2200 memoryManager,
2201 shape0,
2202 input0,
2203 shape1,
2204 input1,
2205 shape0,
2206 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002207}
2208
kevmay012b4d88e2019-01-24 14:05:09 +00002209LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002210 armnn::IWorkloadFactory& workloadFactory,
2211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2212{
2213 const unsigned int shape0[] = { 1, 2, 2, 3 };
2214 const unsigned int shape1[] = { 1, 1, 1, 3 };
2215
2216 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2217 7, 8, 9, 10, 11, 12 });
2218
2219 std::vector<float> input1({ 1, 3, 2});
2220
kevmay012b4d88e2019-01-24 14:05:09 +00002221 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2222 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002223
kevmay012b4d88e2019-01-24 14:05:09 +00002224 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002225 workloadFactory,
2226 memoryManager,
2227 shape0,
2228 input0,
2229 shape1,
2230 input1,
2231 shape0,
2232 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002233}
2234
2235LayerTestResult<uint8_t, 4> GreaterUint8Test(
2236 armnn::IWorkloadFactory& workloadFactory,
2237 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2238{
2239 unsigned int shape[] = { 2, 2, 2, 2 };
2240
2241 // See dequantized values to the right.
2242 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2243 3, 3, 3, 3, 5, 5, 5, 5 });
2244
2245 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2246 2, 2, 2, 2, 5, 5, 5, 5 });
2247
2248 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
2249 1, 1, 1, 1, 0, 0, 0, 0 });
2250
kevmay012b4d88e2019-01-24 14:05:09 +00002251 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2252 armnn::DataType::QuantisedAsymm8,
2253 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002254 workloadFactory,
2255 memoryManager,
2256 shape,
2257 input0,
2258 shape,
2259 input1,
2260 shape,
2261 output,
2262 1.0f,
2263 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002264}
2265
2266LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
2267 armnn::IWorkloadFactory& workloadFactory,
2268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2269{
2270 const unsigned int shape0[] = { 1, 2, 2, 3 };
2271 const unsigned int shape1[] = { 1, 1, 1, 1 };
2272
2273 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2274 7, 8, 9, 10, 11, 12 });
2275
2276 std::vector<uint8_t> input1({ 1 });
2277
2278 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
2279 1, 1, 1, 1, 1, 1 });
2280
kevmay012b4d88e2019-01-24 14:05:09 +00002281 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2282 armnn::DataType::QuantisedAsymm8,
2283 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002284 workloadFactory,
2285 memoryManager,
2286 shape0,
2287 input0,
2288 shape1,
2289 input1,
2290 shape0,
2291 output,
2292 1.0f,
2293 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002294}
2295
2296LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
2297 armnn::IWorkloadFactory& workloadFactory,
2298 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2299{
2300 const unsigned int shape0[] = { 1, 2, 2, 3 };
2301 const unsigned int shape1[] = { 1, 1, 1, 3 };
2302
2303 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2304 7, 8, 9, 10, 11, 12 });
2305
2306 std::vector<uint8_t> input1({ 1, 1, 3});
2307
2308 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
2309 1, 1, 1, 1, 1, 1 });
2310
kevmay012b4d88e2019-01-24 14:05:09 +00002311 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2312 armnn::DataType::QuantisedAsymm8,
2313 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002314 workloadFactory,
2315 memoryManager,
2316 shape0,
2317 input0,
2318 shape1,
2319 input1,
2320 shape0,
2321 output,
2322 1.0f,
2323 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002324}
2325
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002326LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2327 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2328{
2329 const unsigned int width = 2;
2330 const unsigned int height = 2;
2331 const unsigned int channelCount = 2;
2332 const unsigned int batchSize = 2;
2333
2334 unsigned int shape[] = { batchSize, channelCount, height, width };
2335
2336 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2337 3, 3, 3, 3, 4, 4, 4, 4 });
2338
2339 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2340 4, 4, 4, 4, 5, 5, 5, 5 });
2341
2342 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
2343 4, 4, 4, 4, 5, 5, 5, 5 });
2344
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002345 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2346 workloadFactory,
2347 memoryManager,
2348 shape,
2349 input0,
2350 shape,
2351 input1,
2352 shape,
2353 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002354}
2355
2356LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
2357 armnn::IWorkloadFactory& workloadFactory,
2358 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2359{
2360 unsigned int shape0[] = { 1, 2, 2, 2 };
2361 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2362
2363 unsigned int shape1[] = { 1, 1, 1, 1 };
2364 std::vector<float> input1({ 2 });
2365
2366 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
2367
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002368 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2369 workloadFactory,
2370 memoryManager,
2371 shape0,
2372 input0,
2373 shape1,
2374 input1,
2375 shape0,
2376 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002377}
2378
2379LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
2380 armnn::IWorkloadFactory& workloadFactory,
2381 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2382{
2383 const unsigned int shape0[] = { 1, 2, 2, 3 };
2384 const unsigned int shape1[] = { 1, 1, 1, 3 };
2385
2386 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2387 7, 8, 9, 10, 11, 12 });
2388
2389 std::vector<float> input1({ 1, 2, 3});
2390
2391 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002392 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002393
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002394 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2395 workloadFactory,
2396 memoryManager,
2397 shape0,
2398 input0,
2399 shape1,
2400 input1,
2401 shape0,
2402 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002403}
2404
2405LayerTestResult<uint8_t, 4> MaximumUint8Test(
2406 armnn::IWorkloadFactory& workloadFactory,
2407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2408{
2409 unsigned int shape[] = { 2, 2, 2, 2 };
2410
2411 // See dequantized values to the right.
2412 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2413 3, 3, 3, 3, 4, 4, 4, 4 });
2414
2415 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2416 4, 4, 4, 4, 5, 5, 5, 5 });
2417
2418 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2419 4, 4, 4, 4, 5, 5, 5, 5 });
2420
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002421 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2422 workloadFactory,
2423 memoryManager,
2424 shape,
2425 input0,
2426 shape,
2427 input1,
2428 shape,
2429 output,
2430 1.0f,
2431 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002432}
2433
2434LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2435 armnn::IWorkloadFactory& workloadFactory,
2436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2437{
2438 const unsigned int shape0[] = { 1, 2, 2, 3 };
2439 const unsigned int shape1[] = { 1, 1, 1, 1 };
2440
2441 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2442 7, 8, 9, 10, 11, 12 });
2443
2444 std::vector<uint8_t> input1({2});
2445
2446 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2447 7, 8, 9, 10, 11, 12 });
2448
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002449 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2450 workloadFactory,
2451 memoryManager,
2452 shape0,
2453 input0,
2454 shape1,
2455 input1,
2456 shape0,
2457 output,
2458 1.0f,
2459 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002460}
2461
2462LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2463 armnn::IWorkloadFactory& workloadFactory,
2464 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2465{
2466 const unsigned int shape0[] = { 1, 2, 2, 3 };
2467 const unsigned int shape1[] = { 1, 1, 1, 3 };
2468
2469 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2470 7, 8, 9, 10, 11, 12 });
2471
2472 std::vector<uint8_t> input1({ 1, 10, 3});
2473
2474 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2475 7, 10, 9, 10, 11, 12 });
2476
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002477 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2478 workloadFactory,
2479 memoryManager,
2480 shape0,
2481 input0,
2482 shape1,
2483 input1,
2484 shape0,
2485 output,
2486 1.0f,
2487 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002488}
2489
Sadik Armagan2999a022019-04-09 14:20:12 +01002490LayerTestResult<int16_t, 4> MaximumInt16Test(
2491 armnn::IWorkloadFactory& workloadFactory,
2492 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2493{
2494 unsigned int shape[] = { 2, 2, 2, 2 };
2495
2496 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2497 3, 3, 3, 3, 4, 4, 4, 4 });
2498
2499 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2500 4, 4, 4, 4, 5, 5, 5, 5 });
2501
2502 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2503 4, 4, 4, 4, 5, 5, 5, 5 });
2504
2505 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2506 workloadFactory,
2507 memoryManager,
2508 shape,
2509 input0,
2510 shape,
2511 input1,
2512 shape,
2513 output,
2514 1.0f,
2515 0);
2516}
2517
2518LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
2519 armnn::IWorkloadFactory& workloadFactory,
2520 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2521{
2522 const unsigned int shape0[] = { 1, 2, 2, 3 };
2523 const unsigned int shape1[] = { 1, 1, 1, 1 };
2524
2525 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2526 7, 8, 9, 10, 11, 12 });
2527
2528 std::vector<int16_t> input1({2});
2529
2530 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
2531 7, 8, 9, 10, 11, 12 });
2532
2533 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2534 workloadFactory,
2535 memoryManager,
2536 shape0,
2537 input0,
2538 shape1,
2539 input1,
2540 shape0,
2541 output,
2542 1.0f,
2543 0);
2544}
2545
2546LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
2547 armnn::IWorkloadFactory& workloadFactory,
2548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2549{
2550 const unsigned int shape0[] = { 1, 2, 2, 3 };
2551 const unsigned int shape1[] = { 1, 1, 1, 3 };
2552
2553 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2554 7, 8, 9, 10, 11, 12 });
2555
2556 std::vector<int16_t> input1({ 1, 10, 3});
2557
2558 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
2559 7, 10, 9, 10, 11, 12 });
2560
2561 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2562 workloadFactory,
2563 memoryManager,
2564 shape0,
2565 input0,
2566 shape1,
2567 input1,
2568 shape0,
2569 output,
2570 1.0f,
2571 0);
2572}
2573
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002574LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
2575 armnn::IWorkloadFactory& workloadFactory,
2576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2577{
2578 unsigned int shape0[] = { 1, 2, 2, 2 };
2579 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2580
2581 unsigned int shape1[] = { 1, 1, 1, 1 };
2582 std::vector<float> input1({ 2 });
2583
2584 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
2585
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002586 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2587 workloadFactory,
2588 memoryManager,
2589 shape0,
2590 input0,
2591 shape1,
2592 input1,
2593 shape0,
2594 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002595}
2596
2597
2598LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
2599 armnn::IWorkloadFactory& workloadFactory,
2600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2601{
2602 unsigned int shape0[] = { 1, 2, 2, 2 };
2603 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
2604
2605 unsigned int shape1[] = { 1, 1, 1, 1 };
2606 std::vector<float> input1({ 5 });
2607
2608 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
2609
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002610 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2611 workloadFactory,
2612 memoryManager,
2613 shape0,
2614 input0,
2615 shape1,
2616 input1,
2617 shape0,
2618 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002619}
2620
2621LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
2622 armnn::IWorkloadFactory & workloadFactory,
2623 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
2624{
2625 const unsigned int shape0[] = { 1, 2, 2, 3 };
2626 const unsigned int shape1[] = { 1, 1, 1, 3 };
2627
2628 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
2629 7, 1, 2, 3, 4, 5 });
2630
2631 std::vector<uint8_t> input1({ 1, 2, 3});
2632
2633 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
2634 1, 1, 2, 1, 2, 3 });
2635
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002636 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2637 workloadFactory,
2638 memoryManager,
2639 shape0,
2640 input0,
2641 shape1,
2642 input1,
2643 shape0,
2644 output,
2645 1.0f,
2646 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002647}
2648
Sadik Armagan2999a022019-04-09 14:20:12 +01002649LayerTestResult<int16_t, 4> MinimumInt16Test(
2650 armnn::IWorkloadFactory& workloadFactory,
2651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2652{
2653 unsigned int shape[] = { 2, 2, 2, 2 };
2654
2655 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2656 3, 3, 3, 3, 4, 4, 4, 4 });
2657
2658 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2659 4, 4, 4, 4, 5, 5, 5, 5 });
2660
2661 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
2662 3, 3, 3, 3, 4, 4, 4, 4 });
2663
2664 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2665 workloadFactory,
2666 memoryManager,
2667 shape,
2668 input0,
2669 shape,
2670 input1,
2671 shape,
2672 output,
2673 1.0f,
2674 0);
2675}
2676
2677LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
2678 armnn::IWorkloadFactory& workloadFactory,
2679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2680{
2681 const unsigned int shape0[] = { 1, 2, 2, 3 };
2682 const unsigned int shape1[] = { 1, 1, 1, 1 };
2683
2684 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2685 7, 8, 9, 10, 11, 12 });
2686
2687 std::vector<int16_t> input1({2});
2688
2689 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
2690 2, 2, 2, 2, 2, 2 });
2691
2692 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2693 workloadFactory,
2694 memoryManager,
2695 shape0,
2696 input0,
2697 shape1,
2698 input1,
2699 shape0,
2700 output,
2701 1.0f,
2702 0);
2703}
2704
2705LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
2706 armnn::IWorkloadFactory& workloadFactory,
2707 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2708{
2709 const unsigned int shape0[] = { 1, 2, 2, 3 };
2710 const unsigned int shape1[] = { 1, 1, 1, 3 };
2711
2712 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2713 7, 8, 9, 10, 11, 12 });
2714
2715 std::vector<int16_t> input1({ 1, 10, 3});
2716
2717 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
2718 1, 8, 3, 1, 10, 3 });
2719
2720 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2721 workloadFactory,
2722 memoryManager,
2723 shape0,
2724 input0,
2725 shape1,
2726 input1,
2727 shape0,
2728 output,
2729 1.0f,
2730 0);
2731}
2732
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002733namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002734LayerTestResult<float,4> MultiplicationTestHelper(
2735 armnn::IWorkloadFactory& workloadFactory,
2736 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2737 const unsigned int shape0[4],
2738 const std::vector<float> & values0,
2739 const unsigned int shape1[4],
2740 const std::vector<float> & values1,
2741 const unsigned int outShape[4],
2742 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00002743{
surmeh01bceff2f2018-03-29 16:29:27 +01002744 const size_t dimensionCount = 4;
2745 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
2746 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
2747 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00002748
surmeh01bceff2f2018-03-29 16:29:27 +01002749 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
2750 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00002751
2752 LayerTestResult<float,4> ret(outputTensorInfo);
2753
2754 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2755 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2756 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2757
2758 armnn::MultiplicationQueueDescriptor data;
2759 armnn::WorkloadInfo info;
2760 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2761 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2762 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2763
2764 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2765
2766 inputHandle0->Allocate();
2767 inputHandle1->Allocate();
2768 outputHandle->Allocate();
2769
2770 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2771 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2772
2773 workload->Execute();
2774
2775 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2776
surmeh01bceff2f2018-03-29 16:29:27 +01002777 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00002778 return ret;
2779}
surmeh01bceff2f2018-03-29 16:29:27 +01002780} // anonymous namespace
2781
2782
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002783LayerTestResult<float,4> MultiplicationTest(
2784 armnn::IWorkloadFactory& workloadFactory,
2785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002786{
2787 const unsigned int width = 2;
2788 const unsigned int height = 2;
2789 const unsigned int channelCount = 2;
2790 const unsigned int batchSize = 2;
2791
2792 unsigned int shape[] = { batchSize, channelCount, height, width };
2793
2794 std::vector<float> input0({
2795 1, 1, 1, 1, 2, 2, 2, 2,
2796 3, 3, 3, 3, 4, 4, 4, 4 });
2797
2798 std::vector<float> input1({
2799 2, 2, 2, 2, 3, 3, 3, 3,
2800 4, 4, 4, 4, 5, 5, 5, 5 });
2801
2802 std::vector<float> output({
2803 2, 2, 2, 2, 6, 6, 6, 6,
2804 12, 12, 12, 12, 20, 20, 20, 20 });
2805
2806 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002807 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002808 shape,
2809 input0,
2810 shape,
2811 input1,
2812 shape,
2813 output);
2814}
2815
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002816LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
2817 armnn::IWorkloadFactory& workloadFactory,
2818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002819{
2820 unsigned int shape0[] = { 1, 2, 2, 2 };
2821 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2822
2823 unsigned int shape1[] = { 1, 1, 1, 1 };
2824 std::vector<float> input1({ 2 });
2825
2826 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
2827
2828 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002829 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002830 shape0,
2831 input0,
2832 shape1,
2833 input1,
2834 shape0,
2835 output);
2836}
2837
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002838LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
2839 armnn::IWorkloadFactory& workloadFactory,
2840 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002841{
2842 unsigned int shape0[] = { 1, 3, 3, 2 };
2843 std::vector<float> input0({
2844 1, 2, 3, 4, 5, 6,
2845 7, 8, 9, 10, 11, 12,
2846 13, 14, 15, 16, 17, 18});
2847
2848 unsigned int shape1[] = { 1, 1, 1, 2 };
2849 std::vector<float> input1({ 1, 2 });
2850
2851 std::vector<float> output({
2852 1, 4, 3, 8, 5, 12,
2853 7, 16, 9, 20, 11, 24,
2854 13, 28, 15, 32, 17, 36});
2855
2856 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002857 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002858 shape0,
2859 input0,
2860 shape1,
2861 input1,
2862 shape0,
2863 output);
2864}
telsoa014fcda012018-03-09 14:13:49 +00002865
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002866LayerTestResult<float,4> CompareMultiplicationTest(
2867 armnn::IWorkloadFactory& workloadFactory,
2868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2869 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002870{
2871 const unsigned int width = 16;
2872 const unsigned int height = 32;
2873 const unsigned int channelCount = 2;
2874 const unsigned int batchSize = 5;
2875
2876 armnn::TensorInfo inputTensorInfo0;
2877 armnn::TensorInfo inputTensorInfo1;
2878 armnn::TensorInfo outputTensorInfo;
2879
2880 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
2881
2882 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2883 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2884 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2885
2886 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
2887
2888 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
2889 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
2890
2891 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2892 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2893 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2894
2895 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
2896 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2897 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2898
2899 armnn::MultiplicationQueueDescriptor data;
2900 armnn::WorkloadInfo info;
2901 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2902 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2903 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2904
2905 armnn::MultiplicationQueueDescriptor refData = data;
2906 armnn::WorkloadInfo refInfo = info;
2907 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
2908 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
2909 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2910
2911 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2912 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
2913
2914 inputHandle0->Allocate();
2915 inputHandle1->Allocate();
2916 outputHandle->Allocate();
2917 inputHandle0Ref->Allocate();
2918 inputHandle1Ref->Allocate();
2919 outputHandleRef->Allocate();
2920
2921 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2922 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2923 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
2924 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2925
2926 workload->Execute();
2927 workloadRef->Execute();
2928
2929 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
2930 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
2931
2932 return comparisonResult;
2933}
2934
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002935LayerTestResult<float,4> CompareBatchNormTest(
2936 armnn::IWorkloadFactory& workloadFactory,
2937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2938 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002939{
2940 const unsigned int width = 2;
2941 const unsigned int height = 3;
2942 const unsigned int channels = 5;
2943 const unsigned int batchSize = 3;
2944
2945 armnn::TensorInfo inputTensorInfo;
2946 armnn::TensorInfo outputTensorInfo;
2947 armnn::TensorInfo tensorInfo;
2948
2949 constexpr unsigned int shape[] = {batchSize, channels, height, width};
2950 constexpr unsigned int tensorShape[] = {channels};
2951
2952 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2953 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2954 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
2955
2956 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
2957
2958 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
2959 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
2960 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
2961 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
2962
2963 LayerTestResult<float,4> ret(outputTensorInfo);
2964
2965 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2966 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2967
2968 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
2969 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2970
2971 armnn::BatchNormalizationQueueDescriptor data;
2972 armnn::WorkloadInfo info;
2973 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
2974 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
2975 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
2976 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
2977
2978 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
2979 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
2980 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
2981 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
2982
2983 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2984 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2985 data.m_Mean = &meanTensor;
2986 data.m_Variance = &varianceTensor;
2987 data.m_Beta = &betaTensor;
2988 data.m_Gamma = &gammaTensor;
2989 data.m_Parameters.m_Eps = 0.01f;
2990
2991 armnn::BatchNormalizationQueueDescriptor refData = data;
2992 armnn::WorkloadInfo refInfo = info;
2993 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
2994 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2995
2996 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
2997 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
2998
2999 inputHandle->Allocate();
3000 outputHandle->Allocate();
3001 inputHandleRef->Allocate();
3002 outputHandleRef->Allocate();
3003
3004 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3005 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3006
3007 workload->Execute();
3008 workloadRef->Execute();
3009
3010 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3011 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3012
3013 return ret;
3014}
3015
surmeh013537c2c2018-05-18 16:31:43 +01003016template<typename T>
3017void PermuteTensorData(
3018 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003019 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003020 const armnn::PermutationVector& mappings,
3021 armnn::TensorInfo & inputTensorInfo,
3022 const T * inputData,
3023 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003024{
surmeh013537c2c2018-05-18 16:31:43 +01003025 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3026 if (inputData == nullptr)
3027 {
3028 // Nullptr is an error in the test. By returning without doing the concatenation
3029 // I expect the caller to fail the test. It still makes sense to report this as
3030 // an assert for Debug builds.
3031 return;
3032 }
telsoa014fcda012018-03-09 14:13:49 +00003033
surmeh013537c2c2018-05-18 16:31:43 +01003034 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3035
3036 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3037 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3038
3039 armnn::PermuteQueueDescriptor queueDescriptor;
3040 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3041 armnn::WorkloadInfo workloadInfo;
3042 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3043 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3044
3045 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3046
3047 inputHandle->Allocate();
3048 outputHandle->Allocate();
3049
3050 CopyDataToITensorHandle(inputHandle.get(), inputData);
3051
3052 workload->Execute();
3053
3054 outputData.resize(outputTensorInfo.GetNumElements());
3055 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3056 inputTensorInfo = outputTensorInfo;
3057}
3058
3059armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
3060 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3061 unsigned int concatDim)
3062{
telsoa014fcda012018-03-09 14:13:49 +00003063 std::vector<armnn::TensorShape> shapes;
3064 shapes.reserve(inputTensorInfos.size());
3065 for (const armnn::TensorInfo& it: inputTensorInfos)
3066 {
3067 shapes.push_back(it.GetShape());
3068 }
surmeh013537c2c2018-05-18 16:31:43 +01003069
3070 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
3071 shapes.end(),
3072 concatDim);
3073}
3074
3075//
narpra015cdda352018-11-19 15:30:27 +00003076// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
3077// In case of <4 dimensions we need to make sure that the concat dimensions are at least
3078// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01003079//
3080
3081bool NeedPermuteForConcat(
3082 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3083 unsigned int concatDim)
3084{
3085 // See note above. Additionally we expect the input shapes to have the
3086 // same number of dimensions.
3087 unsigned int nDimensions = 0;
3088
telsoa01c577f2c2018-08-31 09:22:23 +01003089 // Determine the number of dimensions as well as sanity check them
3090 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01003091 for (auto && tensorInfo : inputTensorInfos)
3092 {
3093 if (!nDimensions)
3094 {
3095 nDimensions = tensorInfo.GetShape().GetNumDimensions();
3096 }
3097 else
3098 {
3099 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
3100 "Input shapes must have the same number of dimensions");
3101 }
3102 }
3103
narpra015cdda352018-11-19 15:30:27 +00003104 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01003105}
3106
3107armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
3108{
3109 unsigned int numDims = inputShape.GetNumDimensions();
3110 if (numDims >= 3)
3111 {
3112 // Nothing to do if the inputShape has at least 3 dimensions.
3113 return inputShape;
3114 }
3115
3116 std::vector<unsigned int> newDims(size_t(3), 1u);
3117 unsigned int expandedBy = 3 - numDims;
3118 for (unsigned int i=0; i<numDims; ++i)
3119 {
3120 newDims[expandedBy+i] = inputShape[i];
3121 }
3122 return armnn::TensorShape(3u, &newDims[0]);
3123}
3124
3125void Generate3dPermuteVectorForConcat(
3126 unsigned int numDimensions,
3127 unsigned int & concatDim,
3128 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
3129{
3130 BOOST_ASSERT_MSG(numDimensions <= 3,
3131 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01003132 unsigned int expandedBy = 3 - numDimensions;
3133 unsigned int expandedConcatAxis = concatDim + expandedBy;
3134
3135 if (expandedConcatAxis == 2)
3136 {
3137 concatDim = 0;
3138 armnn::PermutationVector forwardPermutation({1, 2, 0});
3139 armnn::PermutationVector reversePermutation({2, 0, 1});
3140 permutations = std::make_pair(forwardPermutation, reversePermutation);
3141 }
3142 else if (expandedConcatAxis == 1)
3143 {
3144 concatDim = 0;
3145 armnn::PermutationVector forwardPermutation({2, 0, 1});
3146 armnn::PermutationVector reversePermutation({1, 2, 0});
3147 permutations = std::make_pair(forwardPermutation, reversePermutation);
3148 }
3149 else
3150 {
3151 BOOST_ASSERT(expandedConcatAxis == 0);
3152 concatDim = 0;
3153 }
3154}
3155
3156//
3157// Permute the input tensors so we can do a supported concatenation.
3158// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
3159// at the front. Finally this function tells what the output shape
3160// of the permuted concatenated tensor is going to be.
3161//
3162template <typename T>
3163void PermuteInputsForConcat(
3164 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003166 std::vector<armnn::TensorInfo> & inputTensorInfos,
3167 std::vector<T *> & inputData,
3168 std::vector<std::vector<T>> & inputDataStorage,
3169 armnn::PermutationVector & permuteVector,
3170 unsigned int & concatDim,
3171 armnn::TensorInfo & outputTensorInfo)
3172{
3173 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
3174 "Expecting more than one tensor to be concatenated here");
3175
3176 unsigned int numDims = 0;
3177 unsigned int nthInput = 0;
3178 const armnn::PermutationVector identity({0, 1, 2});
3179
3180 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
3181 std::make_pair(identity, identity);
3182
3183 inputDataStorage.resize(inputData.size());
3184
3185 for (auto && tensorInfo : inputTensorInfos)
3186 {
3187 if (numDims == 0)
3188 {
3189 numDims = tensorInfo.GetShape().GetNumDimensions();
3190 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00003191
telsoa01c577f2c2018-08-31 09:22:23 +01003192 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01003193 permuteVector = permutations.second;
3194 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
3195 "Test logic error, we don't need permutation, so we shouldn't arrive here");
3196 }
3197 else
3198 {
3199 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
3200 "All inputs must have the same number of dimensions");
3201 }
3202
3203 armnn::TensorInfo newTensorInfo = tensorInfo;
3204 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
3205
3206 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003207 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003208 permutations.first,
3209 newTensorInfo,
3210 inputData[nthInput],
3211 inputDataStorage[nthInput]);
3212
3213 inputData[nthInput] = inputDataStorage[nthInput].data();
3214 inputTensorInfos[nthInput] = newTensorInfo;
3215
3216 ++nthInput;
3217 }
3218
3219 outputTensorInfo.SetShape(
3220 armnnUtils::Permuted(
3221 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
3222 permutations.first));
3223}
3224
3225
3226//
3227// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01003228// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01003229// output.
3230//
3231template <typename T>
3232void PermuteOutputForConcat(
3233 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003235 const armnn::TensorInfo & tensorInfo,
3236 const armnn::PermutationVector & permuteVector,
3237 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
3238 T * data)
3239{
3240 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
3241 if (data == nullptr)
3242 {
3243 // Nullptr is an error in the test. By returning without doing the permutation
3244 // I expect the caller to fail the test. It still makes sense to report this as
3245 // an assert for Debug builds.
3246 return;
3247 }
3248
3249 armnn::TensorInfo resultTensorInfo = tensorInfo;
3250 std::vector<T> inputData(tensorInfo.GetNumElements());
3251 std::vector<T> outputData;
3252
3253 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
3254
3255 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003256 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003257 permuteVector,
3258 resultTensorInfo,
3259 &inputData[0],
3260 outputData);
3261
3262 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
3263}
3264
3265template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003266void Concatenate(
3267 armnn::IWorkloadFactory& workloadFactory,
3268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3269 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
3270 std::initializer_list<T *> inputsOrig,
3271 const armnn::TensorInfo& outputTensorInfoOrig,
3272 T * output,
narpra015cdda352018-11-19 15:30:27 +00003273 unsigned int concatDim,
3274 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01003275{
3276 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
3277 if (output == nullptr)
3278 {
3279 // Nullptr is an error in the test. By returning without doing the permutation
3280 // I expect the caller to fail the test. It still makes sense to report this as
3281 // an assert for Debug builds.
3282 return;
3283 }
3284
telsoa01c577f2c2018-08-31 09:22:23 +01003285 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01003286 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
3287 std::vector<T *> inputs = inputsOrig;
3288 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
3289
3290 armnn::PermutationVector permuteVector{0, 1, 2};
3291
telsoa01c577f2c2018-08-31 09:22:23 +01003292 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01003293 std::vector<std::vector<T>> tmpInputDataStorage;
3294
3295 const size_t inputCount = inputTensorInfos.size();
3296
3297 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
3298
3299 if (needPermuteForConcat)
3300 {
3301 //
3302 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01003303 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01003304 //
3305 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003306 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003307 inputTensorInfos,
3308 inputs,
3309 tmpInputDataStorage,
3310 permuteVector,
3311 concatDim,
3312 outputTensorInfo);
3313 }
3314
narpra015cdda352018-11-19 15:30:27 +00003315 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00003316
3317 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
3318 inputHandles.reserve(inputCount);
3319
narpra015cdda352018-11-19 15:30:27 +00003320 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3321
3322 armnn::MergerQueueDescriptor queueDescriptor;
3323 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
3324 queueDescriptor.m_Parameters = viewsDescriptor;
3325
3326 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003327 {
narpra015cdda352018-11-19 15:30:27 +00003328 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
3329 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
3330 {
3331 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
3332 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
3333 }
telsoa014fcda012018-03-09 14:13:49 +00003334
narpra015cdda352018-11-19 15:30:27 +00003335 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00003336
narpra015cdda352018-11-19 15:30:27 +00003337 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3338 for (unsigned int i = 0; i < inputCount; ++i)
3339 {
3340 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
3341 std::unique_ptr<armnn::ITensorHandle> inputHandle =
3342 subTensorsSupported ?
3343 workloadFactory.CreateSubTensorHandle(*outputHandle,
3344 inputTensorInfo.GetShape(),
3345 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
3346 workloadFactory.CreateTensorHandle(inputTensorInfo);
3347
3348 inputHandles.emplace_back(std::move(inputHandle));
3349 }
3350
telsoa014fcda012018-03-09 14:13:49 +00003351 }
narpra015cdda352018-11-19 15:30:27 +00003352 else
3353 {
3354 for (unsigned int i = 0; i < inputCount; ++i)
3355 {
3356 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
3357 inputHandles.emplace_back(std::move(inputHandle));
3358 }
3359 }
telsoa014fcda012018-03-09 14:13:49 +00003360
3361 for (unsigned int i = 0; i < inputCount; ++i)
3362 {
surmeh013537c2c2018-05-18 16:31:43 +01003363 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00003364 }
3365
3366 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3367
3368 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
3369
3370 for (auto& inputHandle : inputHandles)
3371 {
3372 inputHandle->Allocate();
3373 }
3374
3375 outputHandle->Allocate();
3376
3377 unsigned int nextInputId = 0;
3378 for (auto& inputHandle : inputHandles)
3379 {
surmeh013537c2c2018-05-18 16:31:43 +01003380 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
3381 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00003382 }
3383
3384 workload->Execute();
3385
surmeh013537c2c2018-05-18 16:31:43 +01003386 if (needPermuteForConcat)
3387 {
3388 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003389 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003390 outputTensorInfo,
3391 permuteVector,
3392 std::move(outputHandle),
3393 output);
3394 }
3395 else
3396 {
3397 CopyDataFromITensorHandle(output, outputHandle.get());
3398 }
telsoa014fcda012018-03-09 14:13:49 +00003399}
3400
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003401template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003402LayerTestResult<T, 1> Concatenation1dTestImpl(
3403 armnn::IWorkloadFactory& workloadFactory,
3404 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3405 float qScale,
3406 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003407{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003408 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003409
3410 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
3411 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
3412 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
3413
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003414 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003415
3416 LayerTestResult<T, 1> result(outputTensorInfo);
3417
3418 std::vector<T> output;
3419 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003420 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003421 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3422 { input0.data(), input1.data(), input2.data() },
3423 outputTensorInfo,
3424 output.data(),
3425 0,
3426 true);
telsoa014fcda012018-03-09 14:13:49 +00003427
3428 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
3429 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3430 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
3431 }));
3432
3433 return result;
3434}
3435
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003436LayerTestResult<float, 1> Concatenation1dTest(
3437 armnn::IWorkloadFactory& workloadFactory,
3438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003440 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003441}
3442
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003443template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003444LayerTestResult<T, 2> Concatenation2dTestImpl(
3445 armnn::IWorkloadFactory& workloadFactory,
3446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003447 const armnn::TensorInfo& outputTensorInfo,
3448 unsigned int dimension,
3449 const float qScale,
3450 const int32_t qOffset)
3451{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003452 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003453
3454 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3455 // Batch 0
3456 1.0f, 2.0f, 3.0f,
3457
3458 // Batch 1
3459 10.0f, 11.0f, 12.0f,
3460 }));
3461
3462 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3463 // Batch 0
3464 4.0f, 5.0f, 6.0f,
3465
3466 // Batch 1
3467 13.0f, 14.0f, 15.0f,
3468 }));
3469
3470 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3471 // Batch 0
3472 7.0f, 8.0f, 9.0f,
3473
3474 // Batch 1
3475 16.0f, 17.0f, 18.0f,
3476 }));
3477
3478 LayerTestResult<T, 2> result(outputTensorInfo);
3479
3480 std::vector<T> output;
3481 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003482 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003483 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3484 { input0.data(), input1.data(), input2.data() },
3485 outputTensorInfo,
3486 output.data(),
3487 dimension,
3488 true);
telsoa014fcda012018-03-09 14:13:49 +00003489
3490 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3491 return result;
3492}
3493
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003494template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003495LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
3496 armnn::IWorkloadFactory& workloadFactory,
3497 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3498 float qScale,
3499 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003500{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003501 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003502
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003503 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3504 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
3505
telsoa014fcda012018-03-09 14:13:49 +00003506 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3507 // Batch 0
3508 1.0f, 2.0f, 3.0f,
3509
3510 // Batch 1
3511 10.0f, 11.0f, 12.0f,
3512
3513 // Batch 2
3514 4.0f, 5.0f, 6.0f,
3515
3516 // Batch 3
3517 13.0f, 14.0f, 15.0f,
3518
3519 // Batch 4
3520 7.0f, 8.0f, 9.0f,
3521
3522 // Batch 5
3523 16.0f, 17.0f, 18.0f,
3524 }));
3525
3526 return result;
3527}
3528
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003529LayerTestResult<float, 2> Concatenation2dDim0Test(
3530 armnn::IWorkloadFactory& workloadFactory,
3531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003532{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003533 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003534}
3535
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003536template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003537LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
3538 armnn::IWorkloadFactory& workloadFactory,
3539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3540 float qScale,
3541 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003542{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003543 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003544
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003545 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3546 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
3547
telsoa014fcda012018-03-09 14:13:49 +00003548 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3549 // Batch 0
3550 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3551
3552 // Batch 1
3553 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
3554 }));
3555
3556 return result;
3557}
3558
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003559LayerTestResult<float, 2> Concatenation2dDim1Test(
3560 armnn::IWorkloadFactory& workloadFactory,
3561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003562{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003563 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003564}
3565
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003566template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003567LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
3568 armnn::IWorkloadFactory& workloadFactory,
3569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3570 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003571 int32_t qOffset)
3572{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003573 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003574 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3575 // Batch 0
3576 1.0f, 2.0f, 3.0f,
3577
3578 // Batch 1
3579 10.0f, 11.0f, 12.0f,
3580 }));
3581
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003582 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003583 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3584 // Batch 0
3585 4.0f, 5.0f, 6.0f,
3586
3587 // Batch 1
3588 13.0f, 14.0f, 15.0f,
3589
3590 // Batch 0
3591 7.0f, 8.0f, 9.0f,
3592 }));
3593
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003594 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003595 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3596 // Batch 1
3597 16.0f, 17.0f, 18.0f,
3598 }));
3599
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003600 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003601 LayerTestResult<T, 2> result(outputTensorInfo);
3602
3603 std::vector<T> output;
3604 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003605 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003606 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3607 { input0.data(), input1.data(), input2.data() },
3608 outputTensorInfo,
3609 output.data(),
3610 0,
3611 true);
telsoa014fcda012018-03-09 14:13:49 +00003612
3613 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3614 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3615 // Batch 0
3616 1.0f, 2.0f, 3.0f,
3617
3618 // Batch 1
3619 10.0f, 11.0f, 12.0f,
3620
3621 // Batch 2
3622 4.0f, 5.0f, 6.0f,
3623
3624 // Batch 3
3625 13.0f, 14.0f, 15.0f,
3626
3627 // Batch 4
3628 7.0f, 8.0f, 9.0f,
3629
3630 // Batch 5
3631 16.0f, 17.0f, 18.0f,
3632 }));
3633
3634 return result;
3635}
3636
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003637LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
3638 armnn::IWorkloadFactory& workloadFactory,
3639 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003640{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003641 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
3642 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003643}
3644
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003645template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003646LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
3647 armnn::IWorkloadFactory& workloadFactory,
3648 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3649 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003650 int32_t qOffset)
3651{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003652 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003653 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3654 // Batch 0
3655 1.0f, 2.0f, 3.0f,
3656
3657 // Batch 1
3658 10.0f, 11.0f, 12.0f,
3659 }));
3660
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003661 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003662 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3663 // Batch 0
3664 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
3665
3666 // Batch 1
3667 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
3668 }));
3669
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003670 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003671 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3672 // Batch 0
3673 9.0f,
3674
3675 // Batch 1
3676 18.0f
3677 }));
3678
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003679 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003680 LayerTestResult<T, 2> result(outputTensorInfo);
3681
3682 std::vector<T> output;
3683 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003684 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003685 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3686 { input0.data(), input1.data(), input2.data() },
3687 outputTensorInfo,
3688 output.data(),
3689 1,
3690 true);
telsoa014fcda012018-03-09 14:13:49 +00003691
3692 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3693 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3694 // Batch 0
3695 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3696
3697 // Batch 1
3698 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
3699 }));
3700
3701 return result;
3702}
3703
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003704LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
3705 armnn::IWorkloadFactory& workloadFactory,
3706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003707{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003708 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
3709 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003710}
3711
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003712template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003713LayerTestResult<T, 3> Concatenation3dTestImpl(
3714 armnn::IWorkloadFactory& workloadFactory,
3715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003716 const armnn::TensorInfo& outputTensorInfo,
3717 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00003718 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00003719 float qScale,
3720 int32_t qOffset)
3721{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003722 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003723
3724 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3725 // Batch 0, Channel 0
3726 1.0f, 2.0f,
3727
3728 // Batch 0, Channel 1
3729 3.0f, 4.0f,
3730
3731 // Batch 0, Channel 2
3732 5.0f, 6.0f,
3733
3734 // Batch 1, Channel 0
3735 19.0f, 20.0f,
3736
3737 // Batch 1, Channel 1
3738 21.0f, 22.0f,
3739
3740 // Batch 1, Channel 2
3741 23.0f, 24.0f
3742 }));
3743
3744 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3745 // Batch 0, Channel 0
3746 7.0f, 8.0f,
3747
3748 // Batch 0, Channel 1
3749 9.0f, 10.0f,
3750
3751 // Batch 0, Channel 2
3752 11.0f, 12.0f,
3753
3754 // Batch 1, Channel 0
3755 25.0f, 26.0f,
3756
3757 // Batch 1, Channel 1
3758 27.0f, 28.0f,
3759
3760 // Batch 1, Channel 2
3761 29.0f, 30.0f
3762 }));
3763
3764 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3765 // Batch 0, Channel 0
3766 13.0f, 14.0f,
3767
3768 // Batch 0, Channel 1
3769 15.0f, 16.0f,
3770
3771 // Batch 0, Channel 2
3772 17.0f, 18.0f,
3773
3774 // Batch 1, Channel 0
3775 31.0f, 32.0f,
3776
3777 // Batch 1, Channel 1
3778 33.0f, 34.0f,
3779
3780 // Batch 1, Channel 2
3781 35.0f, 36.0f
3782 }));
3783
3784 LayerTestResult<T, 3> result(outputTensorInfo);
3785
3786 std::vector<T> output;
3787 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003788 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003789 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3790 { input0.data(), input1.data(), input2.data() },
3791 outputTensorInfo,
3792 output.data(),
3793 dimension,
3794 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003795
3796 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3797 return result;
3798}
3799
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003800template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003801LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
3802 armnn::IWorkloadFactory& workloadFactory,
3803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3804 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003805 int32_t qOffset)
3806{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003807 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003808
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003809 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
3810 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
3811
telsoa014fcda012018-03-09 14:13:49 +00003812 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3813 // Batch 0, Channel 0
3814 1.0f, 2.0f,
3815
3816 // Batch 0, Channel 1
3817 3.0f, 4.0f,
3818
3819 // Batch 0, Channel 2
3820 5.0f, 6.0f,
3821
3822 // Batch 1, Channel 0
3823 19.0f, 20.0f,
3824
3825 // Batch 1, Channel 1
3826 21.0f, 22.0f,
3827
3828 // Batch 1, Channel 2
3829 23.0f, 24.0f,
3830
3831 // Batch 2, Channel 0
3832 7.0f, 8.0f,
3833
3834 // Batch 2, Channel 1
3835 9.0f, 10.0f,
3836
3837 // Batch 2, Channel 2
3838 11.0f, 12.0f,
3839
3840 // Batch 3, Channel 0
3841 25.0f, 26.0f,
3842
3843 // Batch 3, Channel 1
3844 27.0f, 28.0f,
3845
3846 // Batch 3, Channel 2
3847 29.0f, 30.0f,
3848
3849 // Batch 4, Channel 0
3850 13.0f, 14.0f,
3851
3852 // Batch 4, Channel 1
3853 15.0f, 16.0f,
3854
3855 // Batch 4, Channel 2
3856 17.0f, 18.0f,
3857
3858 // Batch 5, Channel 0
3859 31.0f, 32.0f,
3860
3861 // Batch 5, Channel 1
3862 33.0f, 34.0f,
3863
3864 // Batch 5, Channel 2
3865 35.0f, 36.0f
3866 }));
narpra015cdda352018-11-19 15:30:27 +00003867
telsoa014fcda012018-03-09 14:13:49 +00003868 return result;
3869}
3870
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003871LayerTestResult<float, 3> Concatenation3dDim0Test(
3872 armnn::IWorkloadFactory& workloadFactory,
3873 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003874{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003875 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003876}
3877
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003878template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003879LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
3880 armnn::IWorkloadFactory& workloadFactory,
3881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3882 float qScale,
3883 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003884{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003885 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003886
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003887 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
3888 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00003889
telsoa014fcda012018-03-09 14:13:49 +00003890 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3891 // Batch 0, Channel 0
3892 1.0f, 2.0f,
3893
3894 // Batch 0, Channel 1
3895 3.0f, 4.0f,
3896
3897 // Batch 0, Channel 2
3898 5.0f, 6.0f,
3899
3900 // Batch 0, Channel 3
3901 7.0f, 8.0f,
3902
3903 // Batch 0, Channel 4
3904 9.0f, 10.0f,
3905
3906 // Batch 0, Channel 5
3907 11.0f, 12.0f,
3908
3909 // Batch 0, Channel 6
3910 13.0f, 14.0f,
3911
3912 // Batch 0, Channel 7
3913 15.0f, 16.0f,
3914
3915 // Batch 0, Channel 8
3916 17.0f, 18.0f,
3917
3918 // Batch 1, Channel 0
3919 19.0f, 20.0f,
3920
3921 // Batch 1, Channel 1
3922 21.0f, 22.0f,
3923
3924 // Batch 1, Channel 2
3925 23.0f, 24.0f,
3926
3927 // Batch 1, Channel 3
3928 25.0f, 26.0f,
3929
3930 // Batch 1, Channel 4
3931 27.0f, 28.0f,
3932
3933 // Batch 1, Channel 5
3934 29.0f, 30.0f,
3935
3936 // Batch 1, Channel 6
3937 31.0f, 32.0f,
3938
3939 // Batch 1, Channel 7
3940 33.0f, 34.0f,
3941
3942 // Batch 1, Channel 8
3943 35.0f, 36.0f
3944 }));
3945
3946 return result;
3947}
3948
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003949LayerTestResult<float, 3> Concatenation3dDim1Test(
3950 armnn::IWorkloadFactory& workloadFactory,
3951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003952{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003953 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003954}
3955
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003956template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003957LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
3958 armnn::IWorkloadFactory& workloadFactory,
3959 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003960 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003961 float qScale,
3962 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003963{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003964 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003965
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003966 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
3967 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00003968
telsoa014fcda012018-03-09 14:13:49 +00003969 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3970 // Batch 0, Channel 0
3971 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
3972
3973 // Batch 0, Channel 1
3974 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
3975
3976 // Batch 0, Channel 2
3977 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
3978
3979 // Batch 1, Channel 0
3980 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
3981
3982 // Batch 1, Channel 1
3983 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
3984
3985 // Batch 1, Channel 2
3986 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
3987 }));
3988
3989 return result;
3990}
3991
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003992LayerTestResult<float, 3> Concatenation3dDim2Test(
3993 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00003994 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3995 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003996{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003997 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
3998 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003999}
4000
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004001template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004002LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4003 armnn::IWorkloadFactory& workloadFactory,
4004 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4005 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004006 int32_t qOffset)
4007{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004008 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004009 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4010 // Batch 0, Channel 0
4011 1.0f, 2.0f,
4012
4013 // Batch 0, Channel 1
4014 3.0f, 4.0f,
4015
4016 // Batch 0, Channel 2
4017 5.0f, 6.0f,
4018
4019 // Batch 1, Channel 0
4020 19.0f, 20.0f,
4021
4022 // Batch 1, Channel 1
4023 21.0f, 22.0f,
4024
4025 // Batch 1, Channel 2
4026 23.0f, 24.0f
4027 }));
4028
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004029 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004030 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4031 // Batch 0, Channel 0
4032 7.0f, 8.0f,
4033
4034 // Batch 0, Channel 1
4035 9.0f, 10.0f,
4036
4037 // Batch 0, Channel 2
4038 11.0f, 12.0f,
4039 }));
4040
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004041 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004042 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4043 // Batch 0, Channel 0
4044 25.0f, 26.0f,
4045
4046 // Batch 0, Channel 1
4047 27.0f, 28.0f,
4048
4049 // Batch 0, Channel 2
4050 29.0f, 30.0f,
4051
4052 // Batch 1, Channel 0
4053 13.0f, 14.0f,
4054
4055 // Batch 1, Channel 1
4056 15.0f, 16.0f,
4057
4058 // Batch 1, Channel 2
4059 17.0f, 18.0f,
4060
4061 // Batch 2, Channel 0
4062 31.0f, 32.0f,
4063
4064 // Batch 2, Channel 1
4065 33.0f, 34.0f,
4066
4067 // Batch 2, Channel 2
4068 35.0f, 36.0f
4069 }));
4070
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004071 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004072 LayerTestResult<T, 3> result(outputTensorInfo);
4073
4074 std::vector<T> output;
4075 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004076 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004077 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4078 { input0.data(), input1.data(), input2.data() },
4079 outputTensorInfo,
4080 output.data(),
4081 0,
4082 true);
telsoa014fcda012018-03-09 14:13:49 +00004083
4084 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4085 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4086 // Batch 0, Channel 0
4087 1.0f, 2.0f,
4088
4089 // Batch 0, Channel 1
4090 3.0f, 4.0f,
4091
4092 // Batch 0, Channel 2
4093 5.0f, 6.0f,
4094
4095 // Batch 1, Channel 0
4096 19.0f, 20.0f,
4097
4098 // Batch 1, Channel 1
4099 21.0f, 22.0f,
4100
4101 // Batch 1, Channel 2
4102 23.0f, 24.0f,
4103
4104 // Batch 2, Channel 0
4105 7.0f, 8.0f,
4106
4107 // Batch 2, Channel 1
4108 9.0f, 10.0f,
4109
4110 // Batch 2, Channel 2
4111 11.0f, 12.0f,
4112
4113 // Batch 3, Channel 0
4114 25.0f, 26.0f,
4115
4116 // Batch 3, Channel 1
4117 27.0f, 28.0f,
4118
4119 // Batch 3, Channel 2
4120 29.0f, 30.0f,
4121
4122 // Batch 4, Channel 0
4123 13.0f, 14.0f,
4124
4125 // Batch 4, Channel 1
4126 15.0f, 16.0f,
4127
4128 // Batch 4, Channel 2
4129 17.0f, 18.0f,
4130
4131 // Batch 5, Channel 0
4132 31.0f, 32.0f,
4133
4134 // Batch 5, Channel 1
4135 33.0f, 34.0f,
4136
4137 // Batch 5, Channel 2
4138 35.0f, 36.0f
4139 }));
4140
4141 return result;
4142}
4143
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004144LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
4145 armnn::IWorkloadFactory& workloadFactory,
4146 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004147{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004148 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4149 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004150}
4151
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004152template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004153LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
4154 armnn::IWorkloadFactory& workloadFactory,
4155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4156 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004157 int32_t qOffset)
4158{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004159 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004160 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4161 // Batch 0, Channel 0
4162 1.0f, 2.0f,
4163
4164 // Batch 0, Channel 1
4165 3.0f, 4.0f,
4166
4167 // Batch 0, Channel 2
4168 5.0f, 6.0f,
4169
4170 // Batch 1, Channel 0
4171 19.0f, 20.0f,
4172
4173 // Batch 1, Channel 1
4174 21.0f, 22.0f,
4175
4176 // Batch 1, Channel 2
4177 23.0f, 24.0f
4178 }));
4179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004180 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004181 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4182 // Batch 0, Channel 0
4183 7.0f, 8.0f,
4184
4185 // Batch 0, Channel 1
4186 9.0f, 10.0f,
4187
4188 // Batch 0, Channel 2
4189 11.0f, 12.0f,
4190
4191 // Batch 0, Channel 3
4192 25.0f, 26.0f,
4193
4194 // Batch 1, Channel 0
4195 27.0f, 28.0f,
4196
4197 // Batch 1, Channel 1
4198 29.0f, 30.0f,
4199
4200 // Batch 1, Channel 2
4201 13.0f, 14.0f,
4202
4203 // Batch 1, Channel 3
4204 15.0f, 16.0f,
4205 }));
4206
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004207 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004208 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4209 // Batch 0, Channel 0
4210 17.0f, 18.0f,
4211
4212 // Batch 1, Channel 0
4213 31.0f, 32.0f,
4214 }));
4215
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004216 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004217 LayerTestResult<T, 3> result(outputTensorInfo);
4218
4219 std::vector<T> output;
4220 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004221 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004222 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4223 { input0.data(), input1.data(), input2.data() },
4224 outputTensorInfo,
4225 output.data(),
4226 1,
4227 true);
telsoa014fcda012018-03-09 14:13:49 +00004228
4229 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4230 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4231 // Batch 0, Channel 0
4232 1.0f, 2.0f,
4233
4234 // Batch 0, Channel 1
4235 3.0f, 4.0f,
4236
4237 // Batch 0, Channel 2
4238 5.0f, 6.0f,
4239
4240 // Batch 0, Channel 3
4241 7.0f, 8.0f,
4242
4243 // Batch 0, Channel 4
4244 9.0f, 10.0f,
4245
4246 // Batch 0, Channel 5
4247 11.0f, 12.0f,
4248
4249 // Batch 0, Channel 6
4250 25.0f, 26.0f,
4251
4252 // Batch 0, Channel 7
4253 17.0f, 18.0f,
4254
4255 // Batch 1, Channel 0
4256 19.0f, 20.0f,
4257
4258 // Batch 1, Channel 1
4259 21.0f, 22.0f,
4260
4261 // Batch 1, Channel 2
4262 23.0f, 24.0f,
4263
4264 // Batch 1, Channel 3
4265 27.0f, 28.0f,
4266
4267 // Batch 1, Channel 4
4268 29.0f, 30.0f,
4269
4270 // Batch 1, Channel 5
4271 13.0f, 14.0f,
4272
4273 // Batch 1, Channel 6
4274 15.0f, 16.0f,
4275
4276 // Batch 1, Channel 7
4277 31.0f, 32.0f,
4278 }));
4279
4280 return result;
4281}
4282
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004283LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
4284 armnn::IWorkloadFactory& workloadFactory,
4285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004286{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004287 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4288 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004289}
4290
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004291template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004292LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
4293 armnn::IWorkloadFactory& workloadFactory,
4294 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004295 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004296 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004297 int32_t qOffset)
4298{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004299 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004300 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4301 // Batch 0, Channel 0
4302 1.0f, 2.0f,
4303
4304 // Batch 0, Channel 1
4305 3.0f, 4.0f,
4306
4307 // Batch 0, Channel 2
4308 5.0f, 6.0f,
4309
4310 // Batch 1, Channel 0
4311 19.0f, 20.0f,
4312
4313 // Batch 1, Channel 1
4314 21.0f, 22.0f,
4315
4316 // Batch 1, Channel 2
4317 23.0f, 24.0f
4318 }));
4319
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004320 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004321 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4322 // Batch 0, Channel 0
4323 7.0f,
4324
4325 // Batch 0, Channel 1
4326 9.0f,
4327
4328 // Batch 0, Channel 2
4329 11.0f,
4330
4331 // Batch 1, Channel 0
4332 25.0f,
4333
4334 // Batch 1, Channel 1
4335 27.0f,
4336
4337 // Batch 1, Channel 2
4338 29.0f
4339 }));
4340
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004341 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004342 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4343 // Batch 0, Channel 0
4344 13.0f, 14.0f, 50.0f,
4345
4346 // Batch 0, Channel 1
4347 15.0f, 16.0f, 51.0f,
4348
4349 // Batch 0, Channel 2
4350 17.0f, 18.0f, 52.0f,
4351
4352 // Batch 1, Channel 0
4353 31.0f, 32.0f, 53.0f,
4354
4355 // Batch 1, Channel 1
4356 33.0f, 34.0f, 54.0f,
4357
4358 // Batch 1, Channel 2
4359 35.0f, 36.0f, 55.0f,
4360 }));
4361
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004362 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004363 LayerTestResult<T, 3> result(outputTensorInfo);
4364
4365 std::vector<T> output;
4366 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004367 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004368 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4369 { input0.data(), input1.data(), input2.data() },
4370 outputTensorInfo,
4371 output.data(),
4372 2,
4373 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004374
4375 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4376 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4377 // Batch 0, Channel 0
4378 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
4379
4380 // Batch 0, Channel 1
4381 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
4382
4383 // Batch 0, Channel 2
4384 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
4385
4386 // Batch 1, Channel 0
4387 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
4388
4389 // Batch 1, Channel 1
4390 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
4391
4392 // Batch 1, Channel 2
4393 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
4394 }));
4395
4396 return result;
4397}
4398
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004399LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
4400 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4402 bool useSubtensor)
4403{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004404 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
4405 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004406}
4407
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004408template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004409LayerTestResult<T, 4> Concatenation4dTestImpl(
4410 armnn::IWorkloadFactory& workloadFactory,
4411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4412 const armnn::TensorInfo& outputTensorInfo,
4413 unsigned int dimension,
4414 bool useSubtensor,
4415 float qScale,
4416 int32_t qOffset)
4417{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004418 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004419
4420 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4421 1.0f, 2.0f,
4422 3.0f, 4.0f,
4423 5.0f, 6.0f,
4424 7.0f, 8.0f,
4425 9.0f, 10.0f,
4426 11.0f, 12.0f
4427 }));
4428
4429 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4430 11.0f, 12.0f,
4431 13.0f, 14.0f,
4432 15.0f, 16.0f,
4433 17.0f, 18.0f,
4434 19.0f, 20.0f,
4435 21.0f, 22.0f
4436 }));
4437
4438 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4439 21.0f, 22.0f,
4440 23.0f, 24.0f,
4441 25.0f, 26.0f,
4442 27.0f, 28.0f,
4443 29.0f, 30.0f,
4444 31.0f, 32.0f
4445 }));
4446
4447 LayerTestResult<T, 4> result(outputTensorInfo);
4448
4449 std::vector<T> output;
4450 output.resize(outputTensorInfo.GetNumElements());
4451
4452 Concatenate<T>(workloadFactory,
4453 memoryManager,
4454 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
4455 {input0.data(), input1.data(), input2.data()},
4456 outputTensorInfo,
4457 output.data(),
4458 dimension,
4459 useSubtensor);
4460
4461 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4462 return result;
4463}
4464
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004465template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004466LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
4467 armnn::IWorkloadFactory& workloadFactory,
4468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4469 float qScale,
4470 int32_t qOffset)
4471{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004472 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004473
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004474 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4475 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4476
narpra015cdda352018-11-19 15:30:27 +00004477 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4478 1.0f, 2.0f,
4479 3.0f, 4.0f,
4480 5.0f, 6.0f,
4481 7.0f, 8.0f,
4482 9.0f, 10.0f,
4483 11.0f, 12.0f,
4484
4485 11.0f, 12.0f,
4486 13.0f, 14.0f,
4487 15.0f, 16.0f,
4488 17.0f, 18.0f,
4489 19.0f, 20.0f,
4490 21.0f, 22.0f,
4491
4492 21.0f, 22.0f,
4493 23.0f, 24.0f,
4494 25.0f, 26.0f,
4495 27.0f, 28.0f,
4496 29.0f, 30.0f,
4497 31.0f, 32.0f
4498 }));
4499 return result;
4500}
4501
4502LayerTestResult<float, 4> Concatenation4dDim0Test(
4503 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004504 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004505{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004506 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004507}
4508
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004509template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004510LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
4511 armnn::IWorkloadFactory& workloadFactory,
4512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4513 float qScale,
4514 int32_t qOffset)
4515{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004516 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004517
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004518 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4519 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
4520
narpra015cdda352018-11-19 15:30:27 +00004521 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4522 1.0f, 2.0f,
4523 3.0f, 4.0f,
4524 5.0f, 6.0f,
4525 7.0f, 8.0f,
4526 9.0f, 10.0f,
4527 11.0f, 12.0f,
4528
4529 11.0f, 12.0f,
4530 13.0f, 14.0f,
4531 15.0f, 16.0f,
4532 17.0f, 18.0f,
4533 19.0f, 20.0f,
4534 21.0f, 22.0f,
4535
4536 21.0f, 22.0f,
4537 23.0f, 24.0f,
4538 25.0f, 26.0f,
4539 27.0f, 28.0f,
4540 29.0f, 30.0f,
4541 31.0f, 32.0f
4542 }));
4543
4544 return result;
4545}
4546
4547LayerTestResult<float, 4> Concatenation4dDim1Test(
4548 armnn::IWorkloadFactory& workloadFactory,
4549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4550{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004551 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004552}
4553
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004554template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004555LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
4556 armnn::IWorkloadFactory& workloadFactory,
4557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4558 float qScale,
4559 int32_t qOffset)
4560{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004561 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004562
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004563 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4564 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
4565
narpra015cdda352018-11-19 15:30:27 +00004566 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4567 1.0f, 2.0f,
4568 3.0f, 4.0f,
4569 11.0f, 12.0f,
4570 13.0f, 14.0f,
4571 21.0f, 22.0f,
4572 23.0f, 24.0f,
4573
4574 5.0f, 6.0f,
4575 7.0f, 8.0f,
4576 15.0f, 16.0f,
4577 17.0f, 18.0f,
4578 25.0f, 26.0f,
4579 27.0f, 28.0f,
4580
4581 9.0f, 10.0f,
4582 11.0f, 12.0f,
4583 19.0f, 20.0f,
4584 21.0f, 22.0f,
4585 29.0f, 30.0f,
4586 31.0f, 32.0f
4587 }));
4588
4589 return result;
4590}
4591
4592LayerTestResult<float, 4> Concatenation4dDim2Test(
4593 armnn::IWorkloadFactory& workloadFactory,
4594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4595{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004596 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004597}
4598
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004599template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004600LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
4601 armnn::IWorkloadFactory& workloadFactory,
4602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4603 float qScale,
4604 int32_t qOffset,
4605 bool useSubtensor)
4606{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004607 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004608
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004609 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4610 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
4611
narpra015cdda352018-11-19 15:30:27 +00004612 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4613 1.0f, 2.0f,
4614 11.0f, 12.0f,
4615 21.0f, 22.0f,
4616 3.0f, 4.0f,
4617 13.0f, 14.0f,
4618 23.0f, 24.0f,
4619
4620 5.0f, 6.0f,
4621 15.0f, 16.0f,
4622 25.0f, 26.0f,
4623 7.0f, 8.0f,
4624 17.0f, 18.0f,
4625 27.0f, 28.0f,
4626
4627 9.0f, 10.0f,
4628 19.0f, 20.0f,
4629 29.0f, 30.0f,
4630 11.0f, 12.0f,
4631 21.0f, 22.0f,
4632 31.0f, 32.0f
4633 }));
4634
4635 return result;
4636}
4637
4638LayerTestResult<float, 4> Concatenation4dDim3Test(
4639 armnn::IWorkloadFactory& workloadFactory,
4640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4641 bool useSubtensor)
4642{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004643 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
4644 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00004645}
4646
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004647template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004648LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
4649 armnn::IWorkloadFactory& workloadFactory,
4650 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4651 float qScale,
4652 int32_t qOffset)
4653{
4654 unsigned int dimension = 0;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004655 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004656
4657 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4658 1.0f, 2.0f,
4659 3.0f, 4.0f,
4660 5.0f, 6.0f,
4661 7.0f, 8.0f,
4662 9.0f, 10.0f,
4663 11.0f, 12.0f
4664 }));
4665
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004666 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004667
4668 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4669 11.0f, 12.0f,
4670 13.0f, 14.0f,
4671 15.0f, 16.0f,
4672 17.0f, 18.0f,
4673 19.0f, 20.0f,
4674 21.0f, 22.0f,
4675
4676 21.0f, 22.0f,
4677 23.0f, 24.0f,
4678 25.0f, 26.0f,
4679 27.0f, 28.0f,
4680 29.0f, 30.0f,
4681 31.0f, 32.0f
4682
4683 }));
4684
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004685 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004686
4687 LayerTestResult<T, 4> result(outputTensorInfo);
4688
4689 std::vector<T> output;
4690 output.resize(outputTensorInfo.GetNumElements());
4691 Concatenate<T>(workloadFactory,
4692 memoryManager,
4693 {inputTensorInfo0, inputTensorInfo1},
4694 {input0.data(), input1.data()},
4695 outputTensorInfo,
4696 output.data(),
4697 dimension,
4698 true);
4699
4700 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4701 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4702 1.0f, 2.0f,
4703 3.0f, 4.0f,
4704 5.0f, 6.0f,
4705 7.0f, 8.0f,
4706 9.0f, 10.0f,
4707 11.0f, 12.0f,
4708
4709 11.0f, 12.0f,
4710 13.0f, 14.0f,
4711 15.0f, 16.0f,
4712 17.0f, 18.0f,
4713 19.0f, 20.0f,
4714 21.0f, 22.0f,
4715
4716 21.0f, 22.0f,
4717 23.0f, 24.0f,
4718 25.0f, 26.0f,
4719 27.0f, 28.0f,
4720 29.0f, 30.0f,
4721 31.0f, 32.0f
4722 }));
4723
4724 return result;
4725}
4726
4727LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
4728 armnn::IWorkloadFactory& workloadFactory,
4729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4730{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004731 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
4732 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004733}
4734
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004735template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004736LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
4737 armnn::IWorkloadFactory& workloadFactory,
4738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4739 float qScale,
4740 int32_t qOffset)
4741{
4742 unsigned int dimension = 1;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004743 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004744
4745 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4746 1.0f, 2.0f,
4747 3.0f, 4.0f,
4748 5.0f, 6.0f,
4749 7.0f, 8.0f,
4750 9.0f, 10.0f,
4751 11.0f, 12.0f
4752 }));
4753
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004754 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004755
4756 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4757 11.0f, 12.0f,
4758 13.0f, 14.0f,
4759 15.0f, 16.0f,
4760 17.0f, 18.0f,
4761
4762 }));
4763
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004764 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004765
4766 LayerTestResult<T, 4> result(outputTensorInfo);
4767
4768 std::vector<T> output;
4769 output.resize(outputTensorInfo.GetNumElements());
4770 Concatenate<T>(workloadFactory,
4771 memoryManager,
4772 {inputTensorInfo0, inputTensorInfo1},
4773 {input0.data(), input1.data()},
4774 outputTensorInfo,
4775 output.data(),
4776 dimension,
4777 true);
4778
4779 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4780 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4781 1.0f, 2.0f,
4782 3.0f, 4.0f,
4783 5.0f, 6.0f,
4784 7.0f, 8.0f,
4785 9.0f, 10.0f,
4786 11.0f, 12.0f,
4787 11.0f, 12.0f,
4788 13.0f, 14.0f,
4789 15.0f, 16.0f,
4790 17.0f, 18.0f
4791 }));
4792
4793 return result;
4794}
4795
4796LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
4797 armnn::IWorkloadFactory& workloadFactory,
4798 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4799{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004800 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
4801 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004802}
4803
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004804template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004805LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
4806 armnn::IWorkloadFactory& workloadFactory,
4807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4808 float qScale,
4809 int32_t qOffset)
4810{
4811 unsigned int dimension = 2;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004812 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004813
4814 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4815 1.0f, 2.0f,
4816 3.0f, 4.0f,
4817 5.0f, 6.0f,
4818 7.0f, 8.0f,
4819 9.0f, 10.0f,
4820 11.0f, 12.0f
4821 }));
4822
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004823 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004824
4825 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4826 11.0f, 12.0f,
4827 13.0f, 14.0f,
4828 15.0f, 16.0f,
4829 17.0f, 18.0f,
4830 19.0f, 20.0f,
4831 21.0f, 22.0f,
4832 23.0f, 24.0f,
4833 25.0f, 26.0f,
4834 27.0f, 28.0f
4835 }));
4836
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004837 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004838
4839 LayerTestResult<T, 4> result(outputTensorInfo);
4840
4841 std::vector<T> output;
4842 output.resize(outputTensorInfo.GetNumElements());
4843 Concatenate<T>(workloadFactory,
4844 memoryManager,
4845 {inputTensorInfo0, inputTensorInfo1},
4846 {input0.data(), input1.data()},
4847 outputTensorInfo,
4848 output.data(),
4849 dimension,
4850 true);
4851
4852 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4853 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4854 1.0f, 2.0f,
4855 3.0f, 4.0f,
4856 11.0f, 12.0f,
4857 13.0f, 14.0f,
4858 15.0f, 16.0f,
4859
4860 5.0f, 6.0f,
4861 7.0f, 8.0f,
4862 17.0f, 18.0f,
4863 19.0f, 20.0f,
4864 21.0f, 22.0f,
4865
4866 9.0f, 10.0f,
4867 11.0f, 12.0f,
4868 23.0f, 24.0f,
4869 25.0f, 26.0f,
4870 27.0f, 28.0f
4871 }));
4872
4873 return result;
4874}
4875
4876LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
4877 armnn::IWorkloadFactory& workloadFactory,
4878 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4879{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004880 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
4881 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004882}
4883
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004884template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004885LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
4886 armnn::IWorkloadFactory& workloadFactory,
4887 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4888 float qScale,
4889 int32_t qOffset,
4890 bool useSubtensor)
4891{
4892 unsigned int dimension = 3;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004893 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004894
4895 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4896 1.0f, 2.0f,
4897 3.0f, 4.0f,
4898 5.0f, 6.0f,
4899 7.0f, 8.0f,
4900 9.0f, 10.0f,
4901 11.0f, 12.0f
4902 }));
4903
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004904 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004905
4906 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4907 11.0f, 12.0f, 13.0f,
4908 14.0f, 15.0f, 16.0f,
4909
4910 17.0f, 18.0f, 19.0f,
4911 20.0f, 21.0f, 22.0f,
4912
4913 23.0f, 24.0f, 25.0f,
4914 26.0f, 27.0f, 28.0f
4915 }));
4916
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004917 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004918
4919 LayerTestResult<T, 4> result(outputTensorInfo);
4920
4921 std::vector<T> output;
4922 output.resize(outputTensorInfo.GetNumElements());
4923 Concatenate<T>(workloadFactory,
4924 memoryManager,
4925 {inputTensorInfo0, inputTensorInfo1},
4926 {input0.data(), input1.data()},
4927 outputTensorInfo,
4928 output.data(),
4929 dimension,
4930 useSubtensor);
4931
4932 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4933 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4934 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
4935 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
4936 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
4937 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
4938 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
4939 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
4940 }));
4941
4942 return result;
4943}
4944
4945LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
4946 armnn::IWorkloadFactory& workloadFactory,
4947 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4948 bool useSubtensor)
4949{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004950 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
4951 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004952}
4953
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004954LayerTestResult<float, 4> ResizeBilinearNopTest(
4955 armnn::IWorkloadFactory& workloadFactory,
4956 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004957 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004958{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004959 const armnn::TensorInfo inputTensorInfo =
4960 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
4961
4962 const armnn::TensorInfo outputTensorInfo =
4963 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00004964
James Conroy6b965822018-11-01 11:33:09 +00004965 std::vector<float> inputData({
4966 1.0f, 2.0f, 3.0f, 4.0f,
4967 2.0f, 3.0f, 4.0f, 5.0f,
4968 3.0f, 4.0f, 5.0f, 6.0f,
4969 4.0f, 5.0f, 6.0f, 7.0f,
4970
telsoa014fcda012018-03-09 14:13:49 +00004971 1.0f, 2.0f, 3.0f, 4.0f,
4972 2.0f, 3.0f, 4.0f, 5.0f,
4973 3.0f, 4.0f, 5.0f, 6.0f,
4974 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00004975 });
4976
4977 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004978 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004979 {
4980 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00004981 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00004982 inputData = tmp;
4983 }
4984
4985 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004986
4987 LayerTestResult<float, 4> result(outputTensorInfo);
4988 result.outputExpected = input;
4989
4990 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4991 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4992
4993 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004994 descriptor.m_Parameters.m_DataLayout = dataLayout;
4995 armnn::WorkloadInfo info;
4996 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4997 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4998
4999 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5000
5001 inputHandle->Allocate();
5002 outputHandle->Allocate();
5003 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5004
James Conroy074f3712018-10-03 09:32:03 +01005005 workload->Execute();
5006
5007 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5008 return result;
5009}
5010
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005011LayerTestResult<float, 4> SimpleResizeBilinearTest(
5012 armnn::IWorkloadFactory& workloadFactory,
5013 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005014 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01005015{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005016 const armnn::TensorInfo inputTensorInfo =
5017 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
5018
5019 const armnn::TensorInfo outputTensorInfo =
5020 armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
James Conroy074f3712018-10-03 09:32:03 +01005021
James Conroy6b965822018-11-01 11:33:09 +00005022 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005023 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00005024 200.0f, 250.0f,
5025
5026 250.0f, 200.0f,
5027 250.0f, 1.0f
5028 });
James Conroy074f3712018-10-03 09:32:03 +01005029
5030 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5031 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00005032 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
5033 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
5034 // which we would expect if projecting the centre).
5035
5036 std::vector<float> outputData({
5037 1.0f,
5038
5039 250.0f
5040 });
5041
5042 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005043 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005044 {
5045 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005046 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005047 inputData = tmp;
5048
5049 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005050 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005051 outputData = tmp1;
5052 }
5053
5054 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5055
James Conroy074f3712018-10-03 09:32:03 +01005056 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005057 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01005058
5059 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5060 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5061
5062 armnn::ResizeBilinearQueueDescriptor descriptor;
5063 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005064 armnn::WorkloadInfo info;
5065 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5066 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5067
5068 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5069
5070 inputHandle->Allocate();
5071 outputHandle->Allocate();
5072 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5073
5074 workload->Execute();
5075
5076 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5077 return result;
5078}
5079
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005080LayerTestResult<float, 4> ResizeBilinearSqMinTest(
5081 armnn::IWorkloadFactory& workloadFactory,
5082 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005083 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005084{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005085 const armnn::TensorInfo inputTensorInfo =
5086 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5087
5088 const armnn::TensorInfo outputTensorInfo =
5089 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005090
James Conroy6b965822018-11-01 11:33:09 +00005091 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005092 1.0f, 2.0f, 3.0f, 4.0f,
5093 2.0f, 3.0f, 4.0f, 5.0f,
5094 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00005095 4.0f, 5.0f, 6.0f, 7.0f,
5096
5097 7.0f, 6.0f, 5.0f, 4.0f,
5098 6.0f, 5.0f, 4.0f, 3.0f,
5099 5.0f, 4.0f, 3.0f, 2.0f,
5100 4.0f, 3.0f, 2.0f, 1.0f
5101 });
5102
5103 std::vector<float> outputData({
5104 1.0f, 3.0f,
5105 3.0f, 5.0f,
5106
5107 7.0f, 5.0f,
5108 5.0f, 3.0f
5109 });
5110
5111 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005112 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005113 {
5114 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005115 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005116 inputData = tmp;
5117
5118 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005119 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005120 outputData = tmp1;
5121 }
5122
5123 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005124
telsoa014fcda012018-03-09 14:13:49 +00005125 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005126 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005127
5128 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5129 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5130
5131 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005132 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005133 armnn::WorkloadInfo info;
5134 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5135 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5136
5137 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5138
5139 inputHandle->Allocate();
5140 outputHandle->Allocate();
5141 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5142
5143 workload->Execute();
5144
5145 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5146 return result;
5147}
5148
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005149LayerTestResult<float, 4> ResizeBilinearMinTest(
5150 armnn::IWorkloadFactory& workloadFactory,
5151 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005152 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005153{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005154 const armnn::TensorInfo inputTensorInfo =
5155 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
5156
5157 const armnn::TensorInfo outputTensorInfo =
5158 armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005159
James Conroy6b965822018-11-01 11:33:09 +00005160 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005161 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
5162 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00005163 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
5164
5165 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
5166 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
5167 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
5168 });
5169
5170 std::vector<float> outputData({
5171 1.0f, 2.6666f, 6.00f,
5172 78.5f, 179.3333f, 401.00f,
5173
5174 987.0f, 454.6670f, 203.33f,
5175 48.5f, 22.3333f, 10.00f
5176 });
5177
5178 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005179 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005180 {
5181 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005182 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005183 inputData = tmp;
5184
5185 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005186 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005187 outputData = tmp1;
5188 }
5189
5190 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005191
5192 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005193 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005194
5195 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5196 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5197
5198 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005199 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005200 armnn::WorkloadInfo info;
5201 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5202 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5203
5204 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5205
5206 inputHandle->Allocate();
5207 outputHandle->Allocate();
5208 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5209
5210 workload->Execute();
5211
5212 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5213 return result;
5214}
5215
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005216LayerTestResult<float, 4> ResizeBilinearMagTest(
5217 armnn::IWorkloadFactory& workloadFactory,
5218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005219 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005220{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005221 const armnn::TensorInfo inputTensorInfo =
5222 armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
5223
5224 const armnn::TensorInfo outputTensorInfo =
5225 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005226
James Conroy6b965822018-11-01 11:33:09 +00005227 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005228 1.0f, 2.0f,
5229 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005230 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00005231
James Conroy6b965822018-11-01 11:33:09 +00005232 233.0f, 144.0f,
5233 21.0f, 13.0f,
5234 2.0f, 1.0f
5235 });
5236
5237 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01005238 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
5239 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005240 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
5241
5242 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
5243 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
5244 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
5245 });
5246
5247 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005248 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005249 {
5250 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005251 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005252 inputData = tmp;
5253
5254 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005255 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005256 outputData = tmp1;
5257 }
5258
5259 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5260
5261 LayerTestResult<float, 4> result(outputTensorInfo);
5262 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005263
5264 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5265 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5266
5267 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005268 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005269 armnn::WorkloadInfo info;
5270 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5271 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5272
5273 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5274
5275 inputHandle->Allocate();
5276 outputHandle->Allocate();
5277 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5278
5279 workload->Execute();
5280
5281 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5282 return result;
5283}
5284
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005285LayerTestResult<float, 2> FakeQuantizationTest(
5286 armnn::IWorkloadFactory& workloadFactory,
5287 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005288{
5289 constexpr unsigned int width = 2;
5290 constexpr unsigned int height = 3;
5291
5292 const armnn::TensorInfo tensorInfo({height, width },
5293 armnn::DataType::Float32);
5294 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5295 -10.0f, -5.0f,
5296 0.0f, 5.0f,
5297 10.0f, 10.0f
5298 }));
5299
5300 LayerTestResult<float, 2> ret(tensorInfo);
5301
5302 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5303
5304 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5305
5306 armnn::FakeQuantizationQueueDescriptor data;
5307 armnn::WorkloadInfo info;
5308
5309 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5310 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5311 float min = -10.f;
5312 float max = 10.f;
5313
5314 data.m_Parameters.m_Min = min;
5315 data.m_Parameters.m_Max = max;
5316
5317 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5318 armnn::FakeQuantizationQueueDescriptor refData = data;
5319 armnn::WorkloadInfo refInfo = info;
5320 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5321
5322 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5323
5324 inputHandle->Allocate();
5325 outputHandle->Allocate();
5326
5327 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5328
5329 workload->Execute();
5330
5331 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5332
5333 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5334 0.0f, 63.0f,
5335 128.0f, 191.0f,
5336 255.0f, 255.0f
5337 }));
5338 return ret;
5339}
5340
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005341namespace
5342{
5343
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005344LayerTestResult<float, 4> L2NormalizationTestImpl(
5345 armnn::IWorkloadFactory& workloadFactory,
5346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5347 const armnn::TensorShape& inputOutputTensorShape,
5348 const std::vector<float>& inputValues,
5349 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00005350 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005351{
5352 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5353 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5354
jimfly013aab7c32018-11-12 13:32:08 +00005355 // at this point if we require it permute the input data
5356 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5357 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005358 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005359 {
5360 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005361 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005362 inputData = tmp;
5363 }
5364
5365 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005366
5367 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00005368 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005369 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005370 {
5371 std::vector<float> tmp(expectedOutputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005372 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
5373 expectedOutputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005374 expectedOutputData = tmp;
5375 }
5376 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005377
5378 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5379 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5380
5381 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00005382 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005383 armnn::WorkloadInfo info;
5384
5385 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5386 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5387
5388 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5389
5390 inputHandle->Allocate();
5391 outputHandle->Allocate();
5392
5393 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005395 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005396
5397 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5398
5399 return result;
5400}
5401
5402float CalcInvL2Norm(std::initializer_list<float> elements)
5403{
5404 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5405 [](float acc, float element) { return acc + element * element; });
5406 return 1.0f / sqrtf(reduction);
5407}
5408
5409} // anonymous namespace
5410
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005411template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005412LayerTestResult<T, 2> Pad2dTestCommon(
5413 armnn::IWorkloadFactory& workloadFactory,
5414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5415 float qScale,
5416 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005417{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005418 const armnn::TensorShape inputShape{ 3, 3 };
5419 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005420
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005421 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5422 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005423
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005424 std::vector<T> inputValues(
5425 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005426 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005427 // Height (3) x Width (3)
5428 4, 8, 6,
5429 7, 4, 4,
5430 3, 2, 4
5431 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005432
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005433 std::vector<T> expectedOutputValues(
5434 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005435 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005436 0, 0, 0, 0, 0, 0, 0,
5437 0, 0, 0, 0, 0, 0, 0,
5438 0, 0, 4, 8, 6, 0, 0,
5439 0, 0, 7, 4, 4, 0, 0,
5440 0, 0, 3, 2, 4, 0, 0,
5441 0, 0, 0, 0, 0, 0, 0,
5442 0, 0, 0, 0, 0, 0, 0
5443 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005444
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005445 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005446
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005447 LayerTestResult<T, 2> result(outputTensorInfo);
5448 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005449
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005450 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5451 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005452
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005453 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005454
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005455 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5456 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5457 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005458
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005459 descriptor.m_Parameters.m_PadList = PadList;
5460 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005461
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005462 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5463 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005464
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005465 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005466
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005467 inputHandle->Allocate();
5468 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005469
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005470 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005471
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005472 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005473
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005474 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005475
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005476 return result;
5477}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005478
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005479template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005480LayerTestResult<T, 3> Pad3dTestCommon(
5481 armnn::IWorkloadFactory& workloadFactory,
5482 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5483 float qScale,
5484 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005485{
5486 const armnn::TensorShape inputShape{ 2, 2, 2 };
5487 const armnn::TensorShape outputShape{ 3, 5, 6 };
5488
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005489 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5490 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005491
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005492 std::vector<T> inputValues(
5493 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005494 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005495 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005496 0, 4,
5497 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005498
5499 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005500 6, 1,
5501 5, 2
5502 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005503
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005504 std::vector<T> expectedOutputValues(
5505 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005506 {
5507
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005508 0, 0, 0, 0, 0, 0,
5509 0, 0, 0, 0, 0, 0,
5510 0, 0, 0, 4, 0, 0,
5511 0, 0, 2, 5, 0, 0,
5512 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005513
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005514 0, 0, 0, 0, 0, 0,
5515 0, 0, 0, 0, 0, 0,
5516 0, 0, 6, 1, 0, 0,
5517 0, 0, 5, 2, 0, 0,
5518 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005519
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005520 0, 0, 0, 0, 0, 0,
5521 0, 0, 0, 0, 0, 0,
5522 0, 0, 0, 0, 0, 0,
5523 0, 0, 0, 0, 0, 0,
5524 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005525
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005526 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005527
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005528 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005529
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005530 LayerTestResult<T, 3> result(outputTensorInfo);
5531 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005532
5533 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5534 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5535
5536 armnn::PadQueueDescriptor descriptor;
5537
5538 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5539 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5540 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5541 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5542
5543 descriptor.m_Parameters.m_PadList = PadList;
5544 armnn::WorkloadInfo info;
5545
5546 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5547 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5548
5549 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5550
5551 inputHandle->Allocate();
5552 outputHandle->Allocate();
5553
5554 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
5555
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005556 workload->Execute();
5557
5558 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
5559
5560 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005561}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005562
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005563template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005564LayerTestResult<T, 4> Pad4dTestCommon(
5565 armnn::IWorkloadFactory& workloadFactory,
5566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5567 float qScale,
5568 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005569{
5570 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
5571 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
5572
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005573 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5574 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005575
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005576 std::vector<T> inputValues(
5577 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005578 {
5579 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005580 0, 1,
5581 2, 3,
5582 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005583
5584 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005585 6, 7,
5586 8, 9,
5587 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005588
5589 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005590 12, 13,
5591 14, 15,
5592 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005593
5594 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005595 18, 19,
5596 20, 21,
5597 22, 23
5598 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005599
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005600 std::vector<T> expectedOutputValues(
5601 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005602 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005603 0, 0, 0, 0,
5604 0, 0, 0, 0,
5605 0, 0, 0, 0,
5606 0, 0, 0, 0,
5607 0, 0, 0, 0,
5608 0, 0, 0, 0,
5609 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005610
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005611 0, 0, 0, 0,
5612 0, 0, 0, 0,
5613 0, 0, 0, 0,
5614 0, 0, 0, 0,
5615 0, 0, 0, 0,
5616 0, 0, 0, 0,
5617 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005618
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005619 0, 0, 0, 0,
5620 0, 0, 0, 0,
5621 0, 0, 0, 0,
5622 0, 0, 0, 0,
5623 0, 0, 0, 0,
5624 0, 0, 0, 0,
5625 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005626
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005627 0, 0, 0, 0,
5628 0, 0, 0, 0,
5629 0, 0, 0, 0,
5630 0, 0, 0, 0,
5631 0, 0, 0, 0,
5632 0, 0, 0, 0,
5633 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005634
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005635 0, 0, 0, 0,
5636 0, 0, 0, 0,
5637 0, 0, 0, 0,
5638 0, 0, 0, 0,
5639 0, 0, 0, 0,
5640 0, 0, 0, 0,
5641 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005642
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005643 0, 0, 0, 0,
5644 0, 0, 0, 0,
5645 0, 0, 0, 0,
5646 0, 0, 0, 0,
5647 0, 0, 0, 0,
5648 0, 0, 0, 0,
5649 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005650
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005651 0, 0, 0, 0,
5652 0, 0, 0, 0,
5653 0, 0, 0, 0,
5654 0, 0, 0, 0,
5655 0, 0, 0, 0,
5656 0, 0, 0, 0,
5657 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005658
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005659 0, 0, 0, 0,
5660 0, 0, 0, 0,
5661 0, 0, 0, 0,
5662 0, 0, 1, 0,
5663 0, 2, 3, 0,
5664 0, 4, 5, 0,
5665 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005666
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005667 0, 0, 0, 0,
5668 0, 0, 0, 0,
5669 0, 0, 0, 0,
5670 0, 6, 7, 0,
5671 0, 8, 9, 0,
5672 0, 10, 11, 0,
5673 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005674
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005675 0, 0, 0, 0,
5676 0, 0, 0, 0,
5677 0, 0, 0, 0,
5678 0, 0, 0, 0,
5679 0, 0, 0, 0,
5680 0, 0, 0, 0,
5681 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005682
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005683 0, 0, 0, 0,
5684 0, 0, 0, 0,
5685 0, 0, 0, 0,
5686 0, 0, 0, 0,
5687 0, 0, 0, 0,
5688 0, 0, 0, 0,
5689 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005690
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005691 0, 0, 0, 0,
5692 0, 0, 0, 0,
5693 0, 0, 0, 0,
5694 0, 0, 0, 0,
5695 0, 0, 0, 0,
5696 0, 0, 0, 0,
5697 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005698
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005699 0, 0, 0, 0,
5700 0, 0, 0, 0,
5701 0, 0, 0, 0,
5702 0, 12, 13, 0,
5703 0, 14, 15, 0,
5704 0, 16, 17, 0,
5705 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005706
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005707 0, 0, 0, 0,
5708 0, 0, 0, 0,
5709 0, 0, 0, 0,
5710 0, 18, 19, 0,
5711 0, 20, 21, 0,
5712 0, 22, 23, 0,
5713 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005714
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005715 0, 0, 0, 0,
5716 0, 0, 0, 0,
5717 0, 0, 0, 0,
5718 0, 0, 0, 0,
5719 0, 0, 0, 0,
5720 0, 0, 0, 0,
5721 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005722
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005723 0, 0, 0, 0,
5724 0, 0, 0, 0,
5725 0, 0, 0, 0,
5726 0, 0, 0, 0,
5727 0, 0, 0, 0,
5728 0, 0, 0, 0,
5729 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005730
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005731 0, 0, 0, 0,
5732 0, 0, 0, 0,
5733 0, 0, 0, 0,
5734 0, 0, 0, 0,
5735 0, 0, 0, 0,
5736 0, 0, 0, 0,
5737 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005738
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005739 0, 0, 0, 0,
5740 0, 0, 0, 0,
5741 0, 0, 0, 0,
5742 0, 0, 0, 0,
5743 0, 0, 0, 0,
5744 0, 0, 0, 0,
5745 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005746
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005747 0, 0, 0, 0,
5748 0, 0, 0, 0,
5749 0, 0, 0, 0,
5750 0, 0, 0, 0,
5751 0, 0, 0, 0,
5752 0, 0, 0, 0,
5753 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005754
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005755 0, 0, 0, 0,
5756 0, 0, 0, 0,
5757 0, 0, 0, 0,
5758 0, 0, 0, 0,
5759 0, 0, 0, 0,
5760 0, 0, 0, 0,
5761 0, 0, 0, 0
5762 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005763
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005764 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005765
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005766 LayerTestResult<T, 4> result(outputTensorInfo);
5767 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005768
5769 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5770 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5771
5772 armnn::PadQueueDescriptor descriptor;
5773
5774 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5775 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5776 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5777 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
5778 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5779
5780 descriptor.m_Parameters.m_PadList = PadList;
5781 armnn::WorkloadInfo info;
5782
5783 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5784 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5785
5786 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5787
5788 inputHandle->Allocate();
5789 outputHandle->Allocate();
5790
5791 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5792
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005793 workload->Execute();
5794
5795 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5796
5797 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005798}
5799
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005800LayerTestResult<uint8_t, 2> PadUint82dTest(
5801 armnn::IWorkloadFactory& workloadFactory,
5802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005803{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005804 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005805}
5806
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005807LayerTestResult<uint8_t, 3> PadUint83dTest(
5808 armnn::IWorkloadFactory& workloadFactory,
5809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005810{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005811 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005812}
5813
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005814LayerTestResult<uint8_t, 4> PadUint84dTest(
5815 armnn::IWorkloadFactory& workloadFactory,
5816 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005817{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005818 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005819}
5820
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005821LayerTestResult<float, 2> PadFloat322dTest(
5822 armnn::IWorkloadFactory& workloadFactory,
5823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005824{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005825 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005826}
5827
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005828LayerTestResult<float, 3> PadFloat323dTest(
5829 armnn::IWorkloadFactory& workloadFactory,
5830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005831{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005832 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005833}
5834
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005835LayerTestResult<float, 4> PadFloat324dTest(
5836 armnn::IWorkloadFactory& workloadFactory,
5837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005838{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005839 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005840}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005841
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005842LayerTestResult<float, 4> L2Normalization1dTest(
5843 armnn::IWorkloadFactory& workloadFactory,
5844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005845 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005846{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005847 // Width: 1
5848 // Height: 1
5849 // Channels: 10
5850 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005851 unsigned int numberOfBatches = 1;
5852 unsigned int numberOfChannels = 10;
5853 unsigned int height = 1;
5854 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00005855
jimfly013aab7c32018-11-12 13:32:08 +00005856
Nina Drozdd41b2592018-11-19 13:03:36 +00005857 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005858 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005859 std::vector<float> inputValues
5860 {
5861 // Batch 0, Channel 0, Height (1) x Width (1)
5862 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00005863
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005864 // Batch 0, Channel 1, Height (1) x Width (1)
5865 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00005866
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005867 // Batch 0, Channel 2, Height (1) x Width (1)
5868 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00005869
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005870 // Batch 0, Channel 3, Height (1) x Width (1)
5871 4.0f,
5872
5873 // Batch 0, Channel 4, Height (1) x Width (1)
5874 5.0f,
5875
5876 // Batch 0, Channel 5, Height (1) x Width (1)
5877 6.0f,
5878
5879 // Batch 0, Channel 6, Height (1) x Width (1)
5880 7.0f,
5881
5882 // Batch 0, Channel 7, Height (1) x Width (1)
5883 8.0f,
5884
5885 // Batch 0, Channel 8, Height (1) x Width (1)
5886 9.0f,
5887
5888 // Batch 0, Channel 9, Height (1) x Width (1)
5889 10.0f
5890 };
telsoa014fcda012018-03-09 14:13:49 +00005891 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005892 std::vector<float> expectedOutputValues
5893 {
5894 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00005895 1.0f * approxInvL2Norm,
5896 2.0f * approxInvL2Norm,
5897 3.0f * approxInvL2Norm,
5898 4.0f * approxInvL2Norm,
5899 5.0f * approxInvL2Norm,
5900 6.0f * approxInvL2Norm,
5901 7.0f * approxInvL2Norm,
5902 8.0f * approxInvL2Norm,
5903 9.0f * approxInvL2Norm,
5904 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005905 };
telsoa014fcda012018-03-09 14:13:49 +00005906
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005907
5908 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005909 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005910}
5911
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005912LayerTestResult<float, 4> L2Normalization2dTest(
5913 armnn::IWorkloadFactory& workloadFactory,
5914 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005915 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005916{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005917 // Width: 5
5918 // Height: 1
5919 // Channels: 2
5920 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005921 unsigned int numberOfBatches = 1;
5922 unsigned int numberOfChannels = 2;
5923 unsigned int height = 1;
5924 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00005925
Nina Drozdd41b2592018-11-19 13:03:36 +00005926 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005927 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005928 std::vector<float> inputValues
5929 {
5930 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00005931 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00005932
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005933 // Batch 0, Channel 1, Height (1) x Width (5)
5934 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
5935 };
5936 std::vector<float> expectedOutputValues
5937 {
5938 // Batch 0, Channel 0, Height (1) x Width (5)
5939 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5940 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5941 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5942 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005943 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
5944
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005945 // Batch 0, Channel 1, Height (1) x Width (5)
5946 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5947 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5948 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5949 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005950 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005951 };
telsoa014fcda012018-03-09 14:13:49 +00005952
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005953 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005954 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005955}
telsoa014fcda012018-03-09 14:13:49 +00005956
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005957LayerTestResult<float, 4> L2Normalization3dTest(
5958 armnn::IWorkloadFactory& workloadFactory,
5959 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005960 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005961{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005962 // Width: 3
5963 // Height: 4
5964 // Channels: 2
5965 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005966 unsigned int numberOfBatches = 1;
5967 unsigned int numberOfChannels = 2;
5968 unsigned int height = 4;
5969 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005970
Nina Drozdd41b2592018-11-19 13:03:36 +00005971 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005972 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005973 std::vector<float> inputValues
5974 {
5975 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005976 119.0f, 21.0f, 150.0f,
5977 149.0f, 32.0f, 179.0f,
5978 15.0f, 227.0f, 141.0f,
5979 147.0f, 199.0f, 220.0f,
5980
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005981 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005982 110.0f, 140.0f, 73.0f,
5983 211.0f, 212.0f, 89.0f,
5984 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005985 162.0f, 12.0f, 161.0f
5986 };
5987 std::vector<float> expectedOutputValues
5988 {
5989 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005990 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5991 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5992 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5993 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5994 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5995 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5996 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5997 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5998 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5999 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6000 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6001 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6002
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006003 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006004 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6005 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6006 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6007 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6008 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6009 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6010 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6011 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6012 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6013 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6014 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006015 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6016 };
telsoa014fcda012018-03-09 14:13:49 +00006017
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006018 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006019 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006020}
telsoa014fcda012018-03-09 14:13:49 +00006021
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006022LayerTestResult<float, 4> L2Normalization4dTest(
6023 armnn::IWorkloadFactory& workloadFactory,
6024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006025 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006026{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006027 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006028 // Height: 4
6029 // Channels: 3
6030 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006031 unsigned int numberOfBatches = 2;
6032 unsigned int numberOfChannels = 3;
6033 unsigned int height = 4;
6034 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006035
Nina Drozdd41b2592018-11-19 13:03:36 +00006036 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006037 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006038 std::vector<float> inputValues
6039 {
6040 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006041 235.0f, 46.0f, 178.0f,
6042 100.0f, 123.0f, 19.0f,
6043 172.0f, 74.0f, 250.0f,
6044 6.0f, 195.0f, 80.0f,
6045
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006046 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006047 113.0f, 95.0f, 202.0f,
6048 77.0f, 114.0f, 71.0f,
6049 122.0f, 246.0f, 166.0f,
6050 82.0f, 28.0f, 37.0f,
6051
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006052 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006053 56.0f, 170.0f, 162.0f,
6054 194.0f, 89.0f, 254.0f,
6055 12.0f, 209.0f, 200.0f,
6056 1.0f, 64.0f, 54.0f,
6057
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006058 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006059 67.0f, 90.0f, 49.0f,
6060 7.0f, 163.0f, 18.0f,
6061 25.0f, 117.0f, 103.0f,
6062 247.0f, 59.0f, 189.0f,
6063
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006064 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006065 239.0f, 104.0f, 199.0f,
6066 17.0f, 124.0f, 153.0f,
6067 222.0f, 217.0f, 75.0f,
6068 32.0f, 126.0f, 21.0f,
6069
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006070 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006071 97.0f, 145.0f, 215.0f,
6072 115.0f, 116.0f, 238.0f,
6073 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006074 92.0f, 125.0f, 88.0f
6075 };
6076 std::vector<float> expectedOutputValues
6077 {
6078 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006079 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6080 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6081 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6082 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6083 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6084 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6085 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6086 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6087 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6088 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6089 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6090 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6091
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006092 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006093 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6094 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6095 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6096 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6097 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6098 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6099 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6100 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6101 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6102 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6103 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6104 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6105
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006106 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006107 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6108 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6109 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6110 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6111 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6112 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6113 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6114 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6115 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6116 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6117 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6118 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6119
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006120 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006121 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6122 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6123 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6124 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6125 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6126 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6127 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6128 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6129 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6130 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6131 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6132 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6133
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006134 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006135 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6136 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6137 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6138 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6139 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6140 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6141 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6142 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6143 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6144 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6145 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6146 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6147
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006148 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006149 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6150 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6151 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6152 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6153 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6154 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6155 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6156 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6157 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6158 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6159 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006160 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
6161 };
telsoa014fcda012018-03-09 14:13:49 +00006162
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006163 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006164 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006165}
6166
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006167template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006168LayerTestResult<T, 4> ConstantTestImpl(
6169 armnn::IWorkloadFactory& workloadFactory,
6170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00006171 float qScale,
6172 int32_t qOffset)
6173{
6174 constexpr unsigned int inputWidth = 3;
6175 constexpr unsigned int inputHeight = 4;
6176 constexpr unsigned int inputChannels = 3;
6177 constexpr unsigned int inputBatchSize = 2;
6178
6179 constexpr unsigned int outputWidth = inputWidth;
6180 constexpr unsigned int outputHeight = inputHeight;
6181 constexpr unsigned int outputChannels = inputChannels;
6182 constexpr unsigned int outputBatchSize = inputBatchSize;
6183
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006184 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00006185
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006186 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00006187
6188 // Set quantization parameters if the requested type is a quantized type.
6189 if(armnn::IsQuantizedType<T>())
6190 {
6191 inputTensorInfo.SetQuantizationScale(qScale);
6192 inputTensorInfo.SetQuantizationOffset(qOffset);
6193 outputTensorInfo.SetQuantizationScale(qScale);
6194 outputTensorInfo.SetQuantizationOffset(qOffset);
6195 }
6196
6197 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
6198 QuantizedVector<T>(qScale, qOffset, {
6199 // Batch 0, Channel 0
6200 235.0f, 46.0f, 178.0f,
6201 100.0f, 123.0f, 19.0f,
6202 172.0f, 74.0f, 250.0f,
6203 6.0f, 195.0f, 80.0f,
6204
6205 // Batch 0, Channel 1
6206 113.0f, 95.0f, 202.0f,
6207 77.0f, 114.0f, 71.0f,
6208 122.0f, 246.0f, 166.0f,
6209 82.0f, 28.0f, 37.0f,
6210
6211 // Batch 0, Channel 2
6212 56.0f, 170.0f, 162.0f,
6213 194.0f, 89.0f, 254.0f,
6214 12.0f, 209.0f, 200.0f,
6215 1.0f, 64.0f, 54.0f,
6216
6217 // Batch 1, Channel 0
6218 67.0f, 90.0f, 49.0f,
6219 7.0f, 163.0f, 18.0f,
6220 25.0f, 117.0f, 103.0f,
6221 247.0f, 59.0f, 189.0f,
6222
6223 // Batch 1, Channel 1
6224 239.0f, 104.0f, 199.0f,
6225 17.0f, 124.0f, 153.0f,
6226 222.0f, 217.0f, 75.0f,
6227 32.0f, 126.0f, 21.0f,
6228
6229 // Batch 1, Channel 2
6230 97.0f, 145.0f, 215.0f,
6231 115.0f, 116.0f, 238.0f,
6232 226.0f, 16.0f, 132.0f,
6233 92.0f, 125.0f, 88.0f,
6234 })));
6235
6236 LayerTestResult<T, 4> result(outputTensorInfo);
6237 result.outputExpected = input;
6238
6239 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6240
6241 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
6242 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
6243
6244 armnn::ConstantQueueDescriptor descriptor;
6245 descriptor.m_LayerOutput = &constantTensor;
6246
6247 armnn::WorkloadInfo info;
6248 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6249
6250 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6251
6252 outputHandle->Allocate();
6253
6254 workload->Execute();
6255
6256 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6257 return result;
6258}
6259
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006260LayerTestResult<float, 4> ConstantTest(
6261 armnn::IWorkloadFactory& workloadFactory,
6262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006263{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006264 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006265}
6266
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006267LayerTestResult<uint8_t, 4> ConstantTestUint8(
6268 armnn::IWorkloadFactory& workloadFactory,
6269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006270{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006271 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006272}
6273
Ferran Balaguerb2845652019-02-27 09:42:06 +00006274LayerTestResult<uint8_t, 3> MergerUint8DifferentQParamsTest(
6275 armnn::IWorkloadFactory& workloadFactory,
6276 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6277{
6278 unsigned int outputWidth = 3;
6279 unsigned int outputHeight = 6;
6280 unsigned int outputChannels = 3;
6281
6282 unsigned int inputWidth1 = 3;
6283 unsigned int inputHeight1 = 6;
6284 unsigned int inputChannels1 = 2;
6285
6286 unsigned int inputWidth2 = 3;
6287 unsigned int inputHeight2 = 6;
6288 unsigned int inputChannels2 = 1;
6289
6290 // Defines the tensor descriptors.
6291 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6292 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6293 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6294
6295 // Quantized input1 tensor. Range [-3, 1]
6296 const float inputScale1 = 0.015686f;
6297 const int32_t inputOffset1 = 192;
6298
6299 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6300 {
6301 1, 2, 3,
6302 4, 5, 6,
6303 7, 8, 9,
6304 10, 11, 12,
6305 13, 14, 15,
6306 16, 17, 18,
6307
6308 19, 20, 21,
6309 22, 23, 24,
6310 25, 26, 27,
6311 28, 29, 30,
6312 31, 32, 33,
6313 34, 35, 36,
6314 })
6315 );
6316
6317 // Quatized input2 tensor. Range [-1, 4]
6318 const float inputScale2 = 0.019608f;
6319 const int32_t inputOffset2 = 50;
6320
6321 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6322 {
6323 37, 38, 39,
6324 40, 41, 42,
6325 43, 44, 45,
6326 46, 47, 48,
6327 49, 50, 51,
6328 52, 53, 54,
6329 })
6330 );
6331
6332 // Output has the same quantization parameters than input1,
6333 // so that only the requantization of input2 is required
6334 const float outputScale = 0.015686f;
6335 const int32_t outputOffset = 192;
6336
6337 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6338
6339 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
6340 {
6341 1, 2, 3,
6342 4, 5, 6,
6343 7, 8, 9,
6344 10, 11, 12,
6345 13, 14, 15,
6346 16, 17, 18,
6347
6348 19, 20, 21,
6349 22, 23, 24,
6350 25, 26, 27,
6351 28, 29, 30,
6352 31, 32, 33,
6353 34, 35, 36,
6354
6355 176, 177, 178,
6356 179, 181, 182,
6357 183, 184, 186,
6358 187, 188, 189,
6359 191, 192, 193,
6360 195, 196, 197,
6361 })
6362 );
6363
6364 outputTensorInfo.SetQuantizationScale(outputScale);
6365 outputTensorInfo.SetQuantizationOffset(outputOffset);
6366 inputTensorInfo1.SetQuantizationScale(inputScale1);
6367 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
6368 inputTensorInfo2.SetQuantizationScale(inputScale2);
6369 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
6370
6371 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
6372 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6373
6374 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
6375 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6376
6377 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6378
6379 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6380
6381 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6382 subTensorsSupported ?
6383 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6384 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6385
6386 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6387 subTensorsSupported ?
6388 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6389 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6390
6391 armnn::MergerQueueDescriptor data;
6392 armnn::WorkloadInfo info;
6393 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6394 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6395 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6396
6397 data.m_ViewOrigins.push_back(window1);
6398 data.m_ViewOrigins.push_back(window2);
6399
6400 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6401
6402 inputHandle1->Allocate();
6403 inputHandle2->Allocate();
6404 outputHandle->Allocate();
6405
6406 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6407 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6408
6409 workload->Execute();
6410
6411 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6412
6413 return ret;
6414}
6415
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006416LayerTestResult<uint8_t, 3> MergerUint8Test(
6417 armnn::IWorkloadFactory& workloadFactory,
6418 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006419{
surmeh013537c2c2018-05-18 16:31:43 +01006420 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00006421 unsigned int outputHeight = 6;
6422 unsigned int outputChannels = 3;
6423
surmeh013537c2c2018-05-18 16:31:43 +01006424 unsigned int inputWidth1 = 3;
6425 unsigned int inputHeight1 = 6;
6426 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00006427
surmeh013537c2c2018-05-18 16:31:43 +01006428 unsigned int inputWidth2 = 3;
6429 unsigned int inputHeight2 = 6;
6430 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00006431
telsoa01c577f2c2018-08-31 09:22:23 +01006432 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00006433 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6434 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6435 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006436
telsoa01c577f2c2018-08-31 09:22:23 +01006437 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00006438 const float scale = 0.13497836f;
6439 const int32_t offset = -7;
6440
6441 outputTensorInfo.SetQuantizationScale(scale);
6442 outputTensorInfo.SetQuantizationOffset(offset);
6443 inputTensorInfo1.SetQuantizationScale(scale);
6444 inputTensorInfo1.SetQuantizationOffset(offset);
6445 inputTensorInfo2.SetQuantizationScale(scale);
6446 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00006447
6448 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6449
6450 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01006451 {
6452 1, 2, 3,
6453 4, 5, 6,
6454 7, 8, 9,
6455 10, 11, 12,
6456 13, 14, 15,
6457 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006458
surmeh013537c2c2018-05-18 16:31:43 +01006459 19, 20, 21,
6460 22, 23, 24,
6461 25, 26, 27,
6462 28, 29, 30,
6463 31, 32, 33,
6464 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006465
surmeh013537c2c2018-05-18 16:31:43 +01006466 37, 38, 39,
6467 40, 41, 42,
6468 43, 44, 45,
6469 46, 47, 48,
6470 49, 50, 51,
6471 52, 53, 54,
6472 })
telsoa014fcda012018-03-09 14:13:49 +00006473 );
6474
telsoa014fcda012018-03-09 14:13:49 +00006475 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6476 {
surmeh013537c2c2018-05-18 16:31:43 +01006477 1, 2, 3,
6478 4, 5, 6,
6479 7, 8, 9,
6480 10, 11, 12,
6481 13, 14, 15,
6482 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006483
surmeh013537c2c2018-05-18 16:31:43 +01006484 19, 20, 21,
6485 22, 23, 24,
6486 25, 26, 27,
6487 28, 29, 30,
6488 31, 32, 33,
6489 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006490 })
6491 );
6492
6493 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6494 {
surmeh013537c2c2018-05-18 16:31:43 +01006495 37, 38, 39,
6496 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00006497 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01006498 46, 47, 48,
6499 49, 50, 51,
6500 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00006501 })
6502 );
6503
telsoa01c577f2c2018-08-31 09:22:23 +01006504 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00006505 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6506
telsoa01c577f2c2018-08-31 09:22:23 +01006507 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00006508 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6509
telsoa014fcda012018-03-09 14:13:49 +00006510
6511 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6512
6513 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6514
6515 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6516 subTensorsSupported ?
6517 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6518 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6519
6520 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6521 subTensorsSupported ?
6522 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6523 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6524
telsoa014fcda012018-03-09 14:13:49 +00006525
6526 armnn::MergerQueueDescriptor data;
6527 armnn::WorkloadInfo info;
6528 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6529 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00006530 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6531
6532 data.m_ViewOrigins.push_back(window1);
6533 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00006534
6535 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6536
6537 inputHandle1->Allocate();
6538 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006539 outputHandle->Allocate();
6540
6541 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6542 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006543
6544 workload->Execute();
6545
6546 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6547
6548 return ret;
6549}
6550
telsoa014fcda012018-03-09 14:13:49 +00006551
surmeh01bceff2f2018-03-29 16:29:27 +01006552namespace
telsoa014fcda012018-03-09 14:13:49 +00006553{
Sadik Armagan2999a022019-04-09 14:20:12 +01006554template <typename T>
6555LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006556 armnn::IWorkloadFactory& workloadFactory,
6557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6558 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006559 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006560 float scale0,
6561 int32_t offset0,
6562 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006563 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006564 float scale1,
6565 int32_t offset1,
6566 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006567 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006568 float outScale,
6569 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01006570{
Sadik Armagan2999a022019-04-09 14:20:12 +01006571 auto dataType = (std::is_same<T, uint8_t>::value ?
6572 armnn::DataType::QuantisedAsymm8 :
6573 armnn::DataType::QuantisedSymm16);
6574
6575 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
6576 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
6577 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00006578
surmeh01bceff2f2018-03-29 16:29:27 +01006579 inputTensorInfo0.SetQuantizationScale(scale0);
6580 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00006581
surmeh01bceff2f2018-03-29 16:29:27 +01006582 inputTensorInfo1.SetQuantizationScale(scale1);
6583 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00006584
surmeh01bceff2f2018-03-29 16:29:27 +01006585 outputTensorInfo.SetQuantizationScale(outScale);
6586 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00006587
Sadik Armagan2999a022019-04-09 14:20:12 +01006588 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
6589 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00006590
Sadik Armagan2999a022019-04-09 14:20:12 +01006591 LayerTestResult<T, 4> result(outputTensorInfo);
6592 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
6593
6594 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
6595 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6596 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6597
6598 armnn::AdditionQueueDescriptor data;
6599 armnn::WorkloadInfo info;
6600 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6601 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6602 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6603
6604 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
6605
6606 inputHandle0->Allocate();
6607 inputHandle1->Allocate();
6608 outputHandle->Allocate();
6609
6610 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
6611 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
6612
6613 workload->Execute();
6614
6615 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6616
6617 return result;
6618}
6619} // anonymous namespace
6620
6621LayerTestResult<uint8_t, 4> AdditionUint8Test(
6622 armnn::IWorkloadFactory& workloadFactory,
6623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6624{
6625 const unsigned int shape0[] = { 1, 2, 2, 3 };
6626 const unsigned int shape1[] = { 1, 2, 2, 3 };
6627
6628 std::vector<uint8_t> input0(
6629 {
6630 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
6631 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
6632 });
6633
6634 std::vector<uint8_t> input1(
6635 {
6636 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
6637 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
6638 });
6639
6640 std::vector<uint8_t> output(
6641 {
6642 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
6643 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
6644 });
6645
6646 return AdditionQuantizeTestHelper(workloadFactory,
6647 memoryManager,
6648 shape0, input0, 7.0f, 3,
6649 shape1, input1, 7.0f, 3,
6650 shape0, output, 7.0f, 3);
6651}
6652
6653LayerTestResult<int16_t, 4> AdditionInt16Test(
6654 armnn::IWorkloadFactory& workloadFactory,
6655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6656{
6657 const unsigned int shape0[] = { 1, 2, 2, 3 };
6658 const unsigned int shape1[] = { 1, 2, 2, 3 };
6659
6660 std::vector<int16_t> input0(
6661 {
6662 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
6663 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
6664 });
6665
6666 std::vector<int16_t> input1(
6667 {
6668 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
6669 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
6670 });
6671
6672 std::vector<int16_t> output(
6673 {
6674 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
6675 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
6676 });
6677
6678 return AdditionQuantizeTestHelper(workloadFactory,
6679 memoryManager,
6680 shape0, input0, 7.0f, 0,
6681 shape1, input1, 7.0f, 0,
6682 shape0, output, 7.0f, 0);
6683}
6684
6685namespace
6686{
6687template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6688LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
6689 armnn::IWorkloadFactory& workloadFactory,
6690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6691 const unsigned int shape0[4],
6692 const std::vector<T> & values0,
6693 float scale0,
6694 int32_t offset0,
6695 const unsigned int shape1[4],
6696 const std::vector<T> & values1,
6697 float scale1,
6698 int32_t offset1,
6699 const unsigned int outShape[4],
6700 const std::vector<T> & outValues,
6701 float outScale,
6702 int32_t outOffset)
6703{
6704 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
6705 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
6706 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
6707
6708 inputTensorInfo0.SetQuantizationScale(scale0);
6709 inputTensorInfo0.SetQuantizationOffset(offset0);
6710
6711 inputTensorInfo1.SetQuantizationScale(scale1);
6712 inputTensorInfo1.SetQuantizationOffset(offset1);
6713
6714 outputTensorInfo.SetQuantizationScale(outScale);
6715 outputTensorInfo.SetQuantizationOffset(outOffset);
6716
6717 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
6718 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
6719
6720 LayerTestResult<T, 4> result(outputTensorInfo);
6721 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00006722
surmeh01bceff2f2018-03-29 16:29:27 +01006723 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00006724 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00006725 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6726
6727 armnn::MultiplicationQueueDescriptor data;
6728 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01006729 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6730 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00006731 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6732
6733 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
6734
surmeh01bceff2f2018-03-29 16:29:27 +01006735 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006736 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006737 outputHandle->Allocate();
6738
surmeh01bceff2f2018-03-29 16:29:27 +01006739 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006740 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006741
6742 workload->Execute();
6743
6744 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6745
6746 return result;
6747}
surmeh01bceff2f2018-03-29 16:29:27 +01006748} // anonymous namespace
6749
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006750LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
6751 armnn::IWorkloadFactory& workloadFactory,
6752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006753{
6754 unsigned int batchSize = 1;
6755 unsigned int channels = 2;
6756 unsigned int height = 2;
6757 unsigned int width = 3;
6758 const unsigned int shape[] = { batchSize, channels, height, width };
6759
telsoa01c577f2c2018-08-31 09:22:23 +01006760 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006761 std::vector<uint8_t> input0({
6762 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
6763 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
6764 });
6765
telsoa01c577f2c2018-08-31 09:22:23 +01006766 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006767 std::vector<uint8_t> input1({
6768 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
6769 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
6770 });
6771
telsoa01c577f2c2018-08-31 09:22:23 +01006772 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006773 std::vector<uint8_t> output(
6774 {
6775 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
6776 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
6777 });
6778
Sadik Armagan2999a022019-04-09 14:20:12 +01006779 // Scale/offset chosen to have output values out of range.
6780 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
6781 memoryManager,
6782 shape,
6783 input0,
6784 4.0f,
6785 1,
6786 shape,
6787 input1,
6788 3.0f,
6789 -2,
6790 shape,
6791 output,
6792 1366.255f,
6793 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01006794}
6795
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006796LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
6797 armnn::IWorkloadFactory& workloadFactory,
6798 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006799{
6800 const unsigned int shape0[] = { 1, 2, 2, 3 };
6801 const unsigned int shape1[] = { 1, 1, 1, 1 };
6802
6803 std::vector<uint8_t> input0({
6804 1, 2, 3, 4, 5, 6,
6805 7, 8, 9, 10, 11, 12
6806 });
6807
6808 std::vector<uint8_t> input1({2});
6809
6810 std::vector<uint8_t> output({
6811 2, 4, 6, 8, 10, 12,
6812 14, 16, 18, 20, 22, 24
6813 });
6814
Sadik Armagan2999a022019-04-09 14:20:12 +01006815 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
6816 memoryManager,
6817 shape0,
6818 input0,
6819 1.0f,
6820 0,
6821 shape1,
6822 input1,
6823 1.0f,
6824 0,
6825 shape0,
6826 output,
6827 1.0f,
6828 0);
surmeh01bceff2f2018-03-29 16:29:27 +01006829}
6830
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006831LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
6832 armnn::IWorkloadFactory& workloadFactory,
6833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006834{
6835 const unsigned int shape0[] = { 1, 2, 2, 3 };
6836 const unsigned int shape1[] = { 1, 1, 1, 3 };
6837
6838 std::vector<uint8_t> input0({
6839 1, 2, 3, 4, 5, 6,
6840 7, 8, 9, 10, 11, 12
6841 });
6842
6843 std::vector<uint8_t> input1({1, 2, 3});
6844
6845 std::vector<uint8_t> output({
6846 1, 4, 9, 4, 10, 18,
6847 7, 16, 27, 10, 22, 36
6848 });
6849
Sadik Armagan2999a022019-04-09 14:20:12 +01006850 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
6851 memoryManager,
6852 shape0,
6853 input0,
6854 1.0f,
6855 0,
6856 shape1,
6857 input1,
6858 1.0f,
6859 0,
6860 shape0,
6861 output,
6862 1.0f,
6863 0);
6864}
6865
6866LayerTestResult<int16_t, 4> MultiplicationInt16Test(
6867 armnn::IWorkloadFactory& workloadFactory,
6868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6869{
6870 const unsigned int shape[] = { 1, 2, 2, 3 };
6871
6872 std::vector<int16_t> input0(
6873 {
6874 6, 7, 8, 9, 10, 11,
6875 12, 13, 14, 15, 16, 17
6876 });
6877
6878 std::vector<int16_t> input1(
6879 {
6880 1, 2, 3, 4, 5, 6,
6881 7, 8, 9, 10, 11, 12
6882 });
6883
6884 std::vector<int16_t> output(
6885 {
6886 6, 14, 24, 36, 50, 66,
6887 84, 104, 126, 150, 176, 204
6888 });
6889
6890 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
6891 memoryManager,
6892 shape,
6893 input0,
6894 1.0f,
6895 0,
6896 shape,
6897 input1,
6898 1.0f,
6899 0,
6900 shape,
6901 output,
6902 1.0f,
6903 0);
6904}
6905
6906LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
6907 armnn::IWorkloadFactory& workloadFactory,
6908 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6909{
6910 const unsigned int shape0[] = { 1, 2, 2, 3 };
6911 const unsigned int shape1[] = { 1, 1, 1, 1 };
6912
6913 std::vector<int16_t> input0(
6914 {
6915 1, 2, 3, 4, 5, 6,
6916 7, 8, 9, 10, 11, 12
6917 });
6918
6919 std::vector<int16_t> input1({2});
6920
6921 std::vector<int16_t> output(
6922 {
6923 2, 4, 6, 8, 10, 12,
6924 14, 16, 18, 20, 22, 24
6925 });
6926
6927 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
6928 memoryManager,
6929 shape0,
6930 input0,
6931 1.0f,
6932 0,
6933 shape1,
6934 input1,
6935 1.0f,
6936 0,
6937 shape0,
6938 output,
6939 1.0f,
6940 0);
6941}
6942
6943LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
6944 armnn::IWorkloadFactory& workloadFactory,
6945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6946{
6947 const unsigned int shape0[] = { 1, 2, 2, 3 };
6948 const unsigned int shape1[] = { 1, 1, 1, 3 };
6949
6950 std::vector<int16_t> input0(
6951 {
6952 1, 2, 3, 4, 5, 6,
6953 7, 8, 9, 10, 11, 12
6954 });
6955
6956 std::vector<int16_t> input1({1, 2, 3});
6957
6958 std::vector<int16_t> output(
6959 {
6960 1, 4, 9, 4, 10, 18,
6961 7, 16, 27, 10, 22, 36
6962 });
6963
6964 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
6965 memoryManager,
6966 shape0,
6967 input0,
6968 1.0f,
6969 0,
6970 shape1,
6971 input1,
6972 1.0f,
6973 0,
6974 shape0,
6975 output,
6976 1.0f,
6977 0);
surmeh01bceff2f2018-03-29 16:29:27 +01006978}
telsoa014fcda012018-03-09 14:13:49 +00006979
David Beckf195f032018-09-06 16:46:34 +01006980namespace
6981{
Sadik Armagan2999a022019-04-09 14:20:12 +01006982template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006983LayerTestResult<T, 4> SubtractionTestHelper(
6984 armnn::IWorkloadFactory& workloadFactory,
6985 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6986 const unsigned int shape0[4],
6987 const std::vector<T>& values0,
6988 float scale0,
6989 int32_t offset0,
6990 const unsigned int shape1[4],
6991 const std::vector<T> & values1,
6992 float scale1,
6993 int32_t offset1,
6994 const unsigned int outShape[4],
6995 const std::vector<T> & outValues,
6996 float outScale,
6997 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01006998{
Sadik Armagan2999a022019-04-09 14:20:12 +01006999 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7000 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7001 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01007002
7003 inputTensorInfo0.SetQuantizationScale(scale0);
7004 inputTensorInfo0.SetQuantizationOffset(offset0);
7005
7006 inputTensorInfo1.SetQuantizationScale(scale1);
7007 inputTensorInfo1.SetQuantizationOffset(offset1);
7008
7009 outputTensorInfo.SetQuantizationScale(outScale);
7010 outputTensorInfo.SetQuantizationOffset(outOffset);
7011
7012 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7013 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7014
7015 LayerTestResult<T, 4> result(outputTensorInfo);
7016 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7017
7018 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7019 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7020 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7021
7022 armnn::SubtractionQueueDescriptor data;
7023 armnn::WorkloadInfo info;
7024 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7025 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7026 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7027
7028 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
7029
7030 inputHandle0->Allocate();
7031 inputHandle1->Allocate();
7032 outputHandle->Allocate();
7033
7034 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7035 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7036
David Beckf195f032018-09-06 16:46:34 +01007037 workload->Execute();
7038
7039 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7040
7041 return result;
7042}
7043} // anonymous namespace
7044
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007045LayerTestResult<uint8_t, 4> SubtractionUint8Test(
7046 armnn::IWorkloadFactory& workloadFactory,
7047 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007048{
7049 const unsigned int shape0[] = { 1, 1, 2, 2 };
7050 const unsigned int shape1[] = { 1, 1, 2, 2 };
7051
7052 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7053 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
7054 std::vector<uint8_t> output({ 3, 3, 5, 5 });
7055
Sadik Armagan2999a022019-04-09 14:20:12 +01007056 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7057 memoryManager,
7058 shape0, input0, 0.5f, 2,
7059 shape1, input1, 1.0f, 0,
7060 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007061}
7062
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007063LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
7064 armnn::IWorkloadFactory& workloadFactory,
7065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007066{
7067 const unsigned int shape0[] = { 1, 1, 2, 2 };
7068 const unsigned int shape1[] = { 1, 1, 1, 1 };
7069
7070 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7071 std::vector<uint8_t> input1({ 2 });
7072 std::vector<uint8_t> output({ 5, 6, 7, 8 });
7073
Sadik Armagan2999a022019-04-09 14:20:12 +01007074 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7075 memoryManager,
7076 shape0, input0, 0.5f, 2,
7077 shape1, input1, 1.0f, 0,
7078 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01007079}
7080
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007081LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
7082 armnn::IWorkloadFactory& workloadFactory,
7083 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007084{
7085 const unsigned int shape0[] = { 1, 1, 2, 2 };
7086 const unsigned int shape1[] = { 1, 1, 2, 1 };
7087
7088 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7089 std::vector<uint8_t> input1({ 2, 1 });
7090 std::vector<uint8_t> output({ 8, 11, 12, 15 });
7091
Sadik Armagan2999a022019-04-09 14:20:12 +01007092 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7093 memoryManager,
7094 shape0, input0, 1.0f, 0,
7095 shape1, input1, 1.0f, 0,
7096 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007097}
7098
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007099LayerTestResult<float, 4> SubtractionTest(
7100 armnn::IWorkloadFactory& workloadFactory,
7101 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007102{
7103 const unsigned int shape0[] = { 1, 1, 2, 2 };
7104 const unsigned int shape1[] = { 1, 1, 2, 2 };
7105
7106 std::vector<float> input0({ 1, 2, 3, 4 });
7107 std::vector<float> input1({ 1, -1, 0, 2 });
7108 std::vector<float> output({ 0, 3, 3, 2 });
7109
Sadik Armagan2999a022019-04-09 14:20:12 +01007110 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7111 memoryManager,
7112 shape0, input0, 1.0f, 0,
7113 shape1, input1, 1.0f, 0,
7114 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007115}
7116
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007117LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
7118 armnn::IWorkloadFactory& workloadFactory,
7119 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007120{
7121 const unsigned int shape0[] = { 1, 1, 2, 2 };
7122 const unsigned int shape1[] = { 1, 1, 1, 1 };
7123
7124 std::vector<float> input0({ 1, 2, 3, 4 });
7125 std::vector<float> input1({ 10 });
7126 std::vector<float> output({ -9, -8, -7, -6 });
7127
Sadik Armagan2999a022019-04-09 14:20:12 +01007128 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7129 memoryManager,
7130 shape0, input0, 1.0f, 0,
7131 shape1, input1, 1.0f, 0,
7132 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007133}
7134
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007135LayerTestResult<float, 4> SubtractionBroadcastTest(
7136 armnn::IWorkloadFactory& workloadFactory,
7137 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007138{
7139 const unsigned int shape0[] = { 1, 1, 2, 2 };
7140 const unsigned int shape1[] = { 1, 1, 1, 2 };
7141
7142 std::vector<float> input0({ 1, 2, 3, 4 });
7143 std::vector<float> input1({ 10, -5 });
7144 std::vector<float> output({ -9, 7, -7, 9 });
7145
Sadik Armagan2999a022019-04-09 14:20:12 +01007146 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7147 memoryManager,
7148 shape0, input0, 1.0f, 0,
7149 shape1, input1, 1.0f, 0,
7150 shape0, output, 1.0f, 0);
7151}
7152
7153LayerTestResult<int16_t, 4> SubtractionInt16Test(
7154 armnn::IWorkloadFactory& workloadFactory,
7155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7156{
7157 const unsigned int shape0[] = { 1, 1, 2, 2 };
7158 const unsigned int shape1[] = { 1, 1, 2, 2 };
7159
7160 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7161 std::vector<int16_t> input1({ 1, 2, 1, 2 });
7162 std::vector<int16_t> output({ 3, 3, 5, 5 });
7163
7164 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7165 memoryManager,
7166 shape0, input0, 0.5f, 0,
7167 shape1, input1, 1.0f, 0,
7168 shape0, output, 1.0f, 0);
7169}
7170
7171LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
7172 armnn::IWorkloadFactory& workloadFactory,
7173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7174{
7175 const unsigned int shape0[] = { 1, 1, 2, 2 };
7176 const unsigned int shape1[] = { 1, 1, 1, 1 };
7177
7178 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7179 std::vector<int16_t> input1({ 2 });
7180 std::vector<int16_t> output({ 3, 4, 5, 6 });
7181
7182 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7183 memoryManager,
7184 shape0, input0, 0.5f, 0,
7185 shape1, input1, 1.0f, 0,
7186 shape0, output, 1.0f, 0);
7187}
7188
7189LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
7190 armnn::IWorkloadFactory& workloadFactory,
7191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7192{
7193 const unsigned int shape0[] = { 1, 1, 2, 2 };
7194 const unsigned int shape1[] = { 1, 1, 2, 1 };
7195
7196 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7197 std::vector<int16_t> input1({ 2, 1 });
7198 std::vector<int16_t> output({ 8, 11, 12, 15 });
7199
7200 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7201 memoryManager,
7202 shape0, input0, 1.0f, 0,
7203 shape1, input1, 1.0f, 0,
7204 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007205}
7206
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007207LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
7208 armnn::IWorkloadFactory& workloadFactory,
7209 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007210{
7211 constexpr unsigned int inputWidth = 4;
7212 constexpr unsigned int inputHeight = 4;
7213 constexpr unsigned int inputChannels = 1;
7214 constexpr unsigned int inputBatchSize = 1;
7215
7216 constexpr unsigned int outputWidth = inputWidth;
7217 constexpr unsigned int outputHeight = inputHeight;
7218 constexpr unsigned int outputChannels = inputChannels;
7219 constexpr unsigned int outputBatchSize = inputBatchSize;
7220
7221 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7222 armnn::DataType::QuantisedAsymm8);
7223 inputTensorInfo.SetQuantizationScale(1.5f);
7224 inputTensorInfo.SetQuantizationOffset(-3);
7225
7226 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7227 armnn::DataType::QuantisedAsymm8);
7228 outputTensorInfo.SetQuantizationScale(1.5f);
7229 outputTensorInfo.SetQuantizationOffset(-3);
7230
7231 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7232 1, 2, 3, 4,
7233 2, 3, 4, 5,
7234 3, 4, 5, 6,
7235 4, 5, 6, 7
7236 }));
7237
7238 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7239 result.outputExpected = input;
7240
7241 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7242 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7243
7244 armnn::ResizeBilinearQueueDescriptor descriptor;
7245 armnn::WorkloadInfo info;
7246 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7247 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7248
7249 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7250
7251 inputHandle->Allocate();
7252 outputHandle->Allocate();
7253 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7254
7255 workload->Execute();
7256
7257 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7258 return result;
7259}
7260
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007261LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
7262 armnn::IWorkloadFactory& workloadFactory,
7263 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007264{
7265 constexpr unsigned int inputWidth = 2;
7266 constexpr unsigned int inputHeight = 2;
7267 constexpr unsigned int inputChannels = 1;
7268 constexpr unsigned int inputBatchSize = 1;
7269
7270 constexpr unsigned int outputWidth = inputWidth / 2;
7271 constexpr unsigned int outputHeight = inputHeight / 2;
7272 constexpr unsigned int outputChannels = inputChannels;
7273 constexpr unsigned int outputBatchSize = inputBatchSize;
7274
7275 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7276 armnn::DataType::QuantisedAsymm8);
7277 inputTensorInfo.SetQuantizationScale(0.1567f);
7278 inputTensorInfo.SetQuantizationOffset(1);
7279
7280 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7281 armnn::DataType::QuantisedAsymm8);
7282 outputTensorInfo.SetQuantizationScale(0.1567f);
7283 outputTensorInfo.SetQuantizationOffset(1);
7284
7285 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7286 1, 255,
7287 200, 250
7288 }));
7289
7290 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
7291 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01007292 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00007293 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
7294 // the centre).
7295 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7296 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7297 1
7298 }));
7299
7300 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7301 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7302
7303 armnn::ResizeBilinearQueueDescriptor descriptor;
7304 armnn::WorkloadInfo info;
7305 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7306 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7307
7308 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7309
7310 inputHandle->Allocate();
7311 outputHandle->Allocate();
7312 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7313
7314 workload->Execute();
7315
7316 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7317 return result;
7318}
7319
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007320LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
7321 armnn::IWorkloadFactory& workloadFactory,
7322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007323{
7324 constexpr unsigned int inputWidth = 4;
7325 constexpr unsigned int inputHeight = 4;
7326 constexpr unsigned int inputChannels = 1;
7327 constexpr unsigned int inputBatchSize = 1;
7328
7329 constexpr unsigned int outputWidth = inputWidth / 2;
7330 constexpr unsigned int outputHeight = inputHeight / 2;
7331 constexpr unsigned int outputChannels = inputChannels;
7332 constexpr unsigned int outputBatchSize = inputBatchSize;
7333
7334 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7335 armnn::DataType::QuantisedAsymm8);
7336 inputTensorInfo.SetQuantizationScale(3.141592f);
7337 inputTensorInfo.SetQuantizationOffset(3);
7338
7339 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7340 armnn::DataType::QuantisedAsymm8);
7341 outputTensorInfo.SetQuantizationScale(3.141592f);
7342 outputTensorInfo.SetQuantizationOffset(3);
7343
7344 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7345 1, 2, 3, 4,
7346 2, 3, 4, 5,
7347 3, 4, 5, 6,
7348 4, 5, 6, 7
7349 }));
7350
7351 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7352 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7353 1, 3,
7354 3, 5
7355 }));
7356
7357 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7358 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7359
7360 armnn::ResizeBilinearQueueDescriptor descriptor;
7361 armnn::WorkloadInfo info;
7362 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7363 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7364
7365 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7366
7367 inputHandle->Allocate();
7368 outputHandle->Allocate();
7369 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7370
7371 workload->Execute();
7372
7373 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7374 return result;
7375}
7376
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007377LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
7378 armnn::IWorkloadFactory& workloadFactory,
7379 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007380{
7381 constexpr unsigned int inputWidth = 3;
7382 constexpr unsigned int inputHeight = 2;
7383 constexpr unsigned int inputChannels = 1;
7384 constexpr unsigned int inputBatchSize = 1;
7385
7386 constexpr unsigned int outputWidth = 2;
7387 constexpr unsigned int outputHeight = 1;
7388 constexpr unsigned int outputChannels = inputChannels;
7389 constexpr unsigned int outputBatchSize = inputBatchSize;
7390
7391 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7392 armnn::DataType::QuantisedAsymm8);
7393 inputTensorInfo.SetQuantizationScale(1.5f);
7394 inputTensorInfo.SetQuantizationOffset(-1);
7395
7396 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7397 armnn::DataType::QuantisedAsymm8);
7398 outputTensorInfo.SetQuantizationScale(1.5f);
7399 outputTensorInfo.SetQuantizationOffset(-1);
7400
7401 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7402 1, 2, 3, // 3.0, 4.5, 6.0
7403 5, 8, 13 // 9.0, 13.5, 21.0
7404 }));
7405
7406 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7407 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7408 1, 3 // 3.0, 5.25
7409 }));
7410
7411 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7412 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7413
7414 armnn::ResizeBilinearQueueDescriptor descriptor;
7415 armnn::WorkloadInfo info;
7416 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7417 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7418
7419 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7420
7421 inputHandle->Allocate();
7422 outputHandle->Allocate();
7423
7424 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7425
7426 workload->Execute();
7427
7428 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7429 return result;
7430}
7431
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007432LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
7433 armnn::IWorkloadFactory& workloadFactory,
7434 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007435{
7436 constexpr unsigned int inputWidth = 2;
7437 constexpr unsigned int inputHeight = 3;
7438 constexpr unsigned int inputChannels = 1;
7439 constexpr unsigned int inputBatchSize = 1;
7440
7441 constexpr unsigned int outputWidth = 5;
7442 constexpr unsigned int outputHeight = 3;
7443 constexpr unsigned int outputChannels = inputChannels;
7444 constexpr unsigned int outputBatchSize = inputBatchSize;
7445
7446 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7447 armnn::DataType::QuantisedAsymm8);
7448 inputTensorInfo.SetQuantizationScale(0.010765f);
7449 inputTensorInfo.SetQuantizationOffset(7);
7450
7451 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7452 armnn::DataType::QuantisedAsymm8);
7453 outputTensorInfo.SetQuantizationScale(0.010132f);
7454 outputTensorInfo.SetQuantizationOffset(-18);
7455
7456 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7457 24, 228, // 0.183005, 2.379065,
7458 105, 128, // 1.05497, 1.302565
7459 230, 71 // 2.400595, 0.68896
7460 }));
7461
7462 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7463 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7464 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
7465 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
7466 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
7467 }));
7468
7469 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7470 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7471
7472 armnn::ResizeBilinearQueueDescriptor descriptor;
7473 armnn::WorkloadInfo info;
7474 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7475 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7476
7477 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7478
7479 inputHandle->Allocate();
7480 outputHandle->Allocate();
7481 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7482
7483 workload->Execute();
7484
7485 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7486 return result;
7487}
7488
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007489LayerTestResult<float, 2> Rsqrt2dTestCommon(
7490 armnn::IWorkloadFactory& workloadFactory,
7491 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7492 const armnn::TensorInfo inputTensorInfo,
7493 const armnn::TensorInfo outputTensorInfo,
7494 std::vector<float> inputValues,
7495 std::vector<float> expectedOutputValues)
7496{
7497 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, std::vector<float>(inputValues));
7498
7499 LayerTestResult<float, 2> result(outputTensorInfo);
7500 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(expectedOutputValues));
7501
7502 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7503 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7504
7505 armnn::RsqrtQueueDescriptor descriptor;
7506
7507 armnn::WorkloadInfo info;
7508
7509 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7510 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7511
7512 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7513
7514 inputHandle->Allocate();
7515 outputHandle->Allocate();
7516
7517 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7518
7519 workload->Execute();
7520
7521 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7522
7523 return result;
7524}
7525LayerTestResult<float, 2> Rsqrt2dTest(
7526 armnn::IWorkloadFactory& workloadFactory,
7527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7528{
7529 const armnn::TensorShape inputShape{ 2, 2 };
7530 const armnn::TensorShape outputShape{ 2, 2 };
7531
7532 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7533 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7534
7535 std::vector<float> inputValues
7536 {
7537 1.f, 4.f,
7538 16.f, 25.f
7539 };
7540
7541 std::vector<float> expectedOutputValues
7542 {
7543 1.f, 0.5f,
7544 0.25f, 0.2f
7545 };
7546
7547 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7548 inputTensorInfo, outputTensorInfo,
7549 inputValues, expectedOutputValues);
7550}
7551
7552LayerTestResult<float, 3> Rsqrt3dTest(
7553 armnn::IWorkloadFactory& workloadFactory,
7554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7555{
7556 const armnn::TensorShape inputShape{ 3, 1, 2 };
7557 const armnn::TensorShape outputShape{ 3, 1, 2 };
7558
7559 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7560 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7561
7562 std::vector<float> inputValues
7563 {
7564 1.f, 4.f, 16.f,
7565 25.f, 64.f, 100.f
7566 };
7567
7568 std::vector<float> expectedOutputValues
7569 {
7570 1.f, 0.5f, 0.25f,
7571 0.2f, 0.125f, 0.1f
7572 };
7573
7574 auto inputTensor = MakeTensor<float, 3>(inputTensorInfo, std::vector<float>(inputValues));
7575
7576 LayerTestResult<float, 3> result(outputTensorInfo);
7577 result.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float >(expectedOutputValues));
7578
7579 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7580 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7581
7582 armnn::RsqrtQueueDescriptor descriptor;
7583
7584 armnn::WorkloadInfo info;
7585
7586 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7587 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7588
7589 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7590
7591 inputHandle->Allocate();
7592 outputHandle->Allocate();
7593
7594 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
7595
7596 workload->Execute();
7597
7598 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
7599
7600 return result;
7601}
7602
7603LayerTestResult<float, 2> RsqrtZeroTest(
7604 armnn::IWorkloadFactory& workloadFactory,
7605 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7606{
7607 const armnn::TensorShape inputShape{ 1, 2 };
7608 const armnn::TensorShape outputShape{ 1, 2 };
7609
7610 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7611 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7612
7613 std::vector<float> inputValues
7614 {
7615 0.f, -0.f
7616 };
7617
7618 std::vector<float> expectedOutputValues
7619 {
7620 INFINITY, -INFINITY
7621 };
7622
7623 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7624 inputTensorInfo, outputTensorInfo,
7625 inputValues, expectedOutputValues);
7626}
7627
7628LayerTestResult<float, 2> RsqrtNegativeTest(
7629 armnn::IWorkloadFactory& workloadFactory,
7630 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7631{
7632 const armnn::TensorShape inputShape{ 1, 2 };
7633 const armnn::TensorShape outputShape{ 1, 2 };
7634
7635 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7636 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7637
7638 std::vector<float> inputValues
7639 {
7640 -25.f, -16.f
7641 };
7642
7643 std::vector<float> expectedOutputValues
7644 {
7645 -NAN, -NAN
7646 };
7647
7648 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7649 inputTensorInfo, outputTensorInfo,
7650 inputValues, expectedOutputValues);
7651}
7652
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007653LayerTestResult<float, 4> BatchNormTest(
7654 armnn::IWorkloadFactory& workloadFactory,
7655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007656{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007657 // BatchSize: 1
7658 // Channels: 2
7659 // Height: 3
7660 // Width: 2
7661
7662 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7663 std::vector<float> inputValues
7664 {
7665 // Batch 0, Channel 0, Height (3) x Width (2)
7666 1.f, 4.f,
7667 4.f, 2.f,
7668 1.f, 6.f,
7669
7670 // Batch 0, Channel 1, Height (3) x Width (2)
7671 1.f, 1.f,
7672 4.f, 1.f,
7673 -2.f, 4.f
7674 };
7675 std::vector<float> expectedOutputValues
7676 {
7677 // Batch 0, Channel 0, Height (3) x Width (2)
7678 1.f, 4.f,
7679 4.f, 2.f,
7680 1.f, 6.f,
7681
7682 // Batch 0, Channel 1, Height (3) x Width (2)
7683 3.f, 3.f,
7684 4.f, 3.f,
7685 2.f, 4.f
7686 };
7687
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007688 return BatchNormTestImpl<armnn::DataType::Float32>(
7689 workloadFactory, memoryManager,
7690 inputOutputShape, inputValues, expectedOutputValues,
7691 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007692}
7693
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007694LayerTestResult<float, 4> BatchNormNhwcTest(
7695 armnn::IWorkloadFactory& workloadFactory,
7696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007697{
7698 // BatchSize: 1
7699 // Height: 3
7700 // Width: 2
7701 // Channels: 2
7702
7703 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7704 std::vector<float> inputValues
7705 {
7706 // Batch 0, Height 0, Width (2) x Channel (2)
7707 1.f, 1.f,
7708 4.f, 1.f,
7709
7710 // Batch 0, Height 1, Width (2) x Channel (2)
7711 4.f, 4.f,
7712 2.f, 1.f,
7713
7714 // Batch 0, Height 2, Width (2) x Channel (2)
7715 1.f, -2.f,
7716 6.f, 4.f
7717 };
7718 std::vector<float> expectedOutputValues
7719 {
7720 // Batch 0, Height 0, Width (2) x Channel (2)
7721 1.f, 3.f,
7722 4.f, 3.f,
7723
7724 // Batch 0, Height 1, Width (2) x Channel (2)
7725 4.f, 4.f,
7726 2.f, 3.f,
7727
7728 // Batch 0, Height 2, Width (2) x Channel (2)
7729 1.f, 2.f,
7730 6.f, 4.f
7731 };
7732
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007733 return BatchNormTestImpl<armnn::DataType::Float32>(
7734 workloadFactory, memoryManager,
7735 inputOutputShape, inputValues, expectedOutputValues,
7736 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00007737}
7738
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007739LayerTestResult<uint8_t, 4> BatchNormUint8Test(
7740 armnn::IWorkloadFactory& workloadFactory,
7741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007742{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007743 // BatchSize: 1
7744 // Channels: 2
7745 // Height: 3
7746 // Width: 2
7747
7748 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7749 std::vector<float> inputValues
7750 {
7751 // Batch 0, Channel 0, Height (3) x Width (2)
7752 1.f, 4.f,
7753 4.f, 2.f,
7754 1.f, 6.f,
7755
7756 // Batch 0, Channel 1, Height (3) x Width (2)
7757 1.f, 1.f,
7758 4.f, 1.f,
7759 -2.f, 4.f
7760 };
7761 std::vector<float> expectedOutputValues
7762 {
7763 // Batch 0, Channel 0, Height (3) x Width (2)
7764 1.f, 4.f,
7765 4.f, 2.f,
7766 1.f, 6.f,
7767
7768 // Batch 0, Channel 1, Height (3) x Width (2)
7769 3.f, 3.f,
7770 4.f, 3.f,
7771 2.f, 4.f
7772 };
7773
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007774 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
7775 workloadFactory, memoryManager,
7776 inputOutputShape, inputValues, expectedOutputValues,
7777 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007778}
7779
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007780LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
7781 armnn::IWorkloadFactory& workloadFactory,
7782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007783{
7784 // BatchSize: 1
7785 // Height: 3
7786 // Width: 2
7787 // Channels: 2
7788
7789 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7790 std::vector<float> inputValues
7791 {
7792 // Batch 0, Height 0, Width (2) x Channel (2)
7793 1.f, 1.f,
7794 4.f, 1.f,
7795
7796 // Batch 0, Height 1, Width (2) x Channel (2)
7797 4.f, 4.f,
7798 2.f, 1.f,
7799
7800 // Batch 0, Height 2, Width (2) x Channel (2)
7801 1.f, -2.f,
7802 6.f, 4.f
7803 };
7804 std::vector<float> expectedOutputValues
7805 {
7806 // Batch 0, Height 0, Width (2) x Channel (2)
7807 1.f, 3.f,
7808 4.f, 3.f,
7809
7810 // Batch 0, Height 1, Width (2) x Channel (2)
7811 4.f, 4.f,
7812 2.f, 3.f,
7813
7814 // Batch 0, Height 2, Width (2) x Channel (2)
7815 1.f, 2.f,
7816 6.f, 4.f
7817 };
7818
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007819 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
7820 (workloadFactory, memoryManager,
7821 inputOutputShape, inputValues, expectedOutputValues,
7822 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00007823}
7824
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007825LayerTestResult<uint8_t, 4> ConstantUint8Test(
7826 armnn::IWorkloadFactory& workloadFactory,
7827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007828{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007829 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00007830}
7831
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007832LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
7833 armnn::IWorkloadFactory& workloadFactory,
7834 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007835{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007836 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007837}
7838
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007839LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
7840 armnn::IWorkloadFactory& workloadFactory,
7841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007842{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007843 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007844}
7845
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007846LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
7847 armnn::IWorkloadFactory& workloadFactory,
7848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007849{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007850 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007851}
7852
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007853LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
7854 armnn::IWorkloadFactory& workloadFactory,
7855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007856{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007857 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7858 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007859}
7860
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007861LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
7862 armnn::IWorkloadFactory& workloadFactory,
7863 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007864{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007865 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7866 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007867}
7868
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007869LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
7870 armnn::IWorkloadFactory& workloadFactory,
7871 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007872{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007873 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007874}
7875
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007876LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
7877 armnn::IWorkloadFactory& workloadFactory,
7878 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007879{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007880 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007881}
7882
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007883LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
7884 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00007885 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7886 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00007887{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007888 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
7889 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007890}
7891
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007892LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
7893 armnn::IWorkloadFactory& workloadFactory,
7894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007895{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007896 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007897}
7898
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007899LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
7900 armnn::IWorkloadFactory& workloadFactory,
7901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007902{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007903 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7904 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00007905}
7906
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007907LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
7908 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00007909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7910 bool useSubtensor)
7911{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007912 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
7913 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007914}
7915
7916LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
7917 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007918 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007919{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007920 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007921}
7922
7923LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
7924 armnn::IWorkloadFactory& workloadFactory,
7925 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7926{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007927 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007928}
7929
7930LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
7931 armnn::IWorkloadFactory& workloadFactory,
7932 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7933{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007934 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007935}
7936
7937LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
7938 armnn::IWorkloadFactory& workloadFactory,
7939 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
7940{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007941 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
7942 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00007943}
7944
7945LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
7946 armnn::IWorkloadFactory& workloadFactory,
7947 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7948{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007949 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
7950 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007951}
7952
7953LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
7954 armnn::IWorkloadFactory& workloadFactory,
7955 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7956{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007957 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
7958 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007959}
7960
7961LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
7962 armnn::IWorkloadFactory& workloadFactory,
7963 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7964{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007965 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
7966 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00007967}
7968
7969LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
7970 armnn::IWorkloadFactory& workloadFactory,
7971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7972 bool useSubtensor)
7973{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007974 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
7975 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00007976}
7977
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007978LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
7979 armnn::IWorkloadFactory& workloadFactory,
7980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7981 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007982{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007983 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
7984 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00007985}
7986
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007987LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
7988 armnn::IWorkloadFactory& workloadFactory,
7989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7990 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007991{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007992 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007993 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007994}
7995
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007996LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
7997 armnn::IWorkloadFactory& workloadFactory,
7998 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7999 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008000{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008001 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8002 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008003}
8004
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008005LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8006 armnn::IWorkloadFactory& workloadFactory,
8007 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8008 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008009{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008010 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008011 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008012}
8013
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008014LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8015 armnn::IWorkloadFactory& workloadFactory,
8016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008017 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008018{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008019 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008020}
8021
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008022LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8023 armnn::IWorkloadFactory& workloadFactory,
8024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008025 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008026{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008027 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008028}
8029
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008030LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8031 armnn::IWorkloadFactory& workloadFactory,
8032 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008033 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008034{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008035 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008036}
8037
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008038LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8039 armnn::IWorkloadFactory& workloadFactory,
8040 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008041 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008042{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008043 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008044 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008045}
8046
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008047LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8048 armnn::IWorkloadFactory& workloadFactory,
8049 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8050 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008051{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008052 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008053 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008054}
8055
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008056LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8057 armnn::IWorkloadFactory& workloadFactory,
8058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008059{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008060 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008061}
8062
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008063LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8064 armnn::IWorkloadFactory& workloadFactory,
8065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008066{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008067 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8068 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008069}
8070
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008071LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8072 armnn::IWorkloadFactory& workloadFactory,
8073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008074 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008075{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008076 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008077}
8078
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008079LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8080 armnn::IWorkloadFactory& workloadFactory,
8081 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008082 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008083{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008084 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008085}
8086
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008087LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8088 armnn::IWorkloadFactory& workloadFactory,
8089 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008090{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008091 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008092}
8093
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008094LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8095 armnn::IWorkloadFactory& workloadFactory,
8096 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008097{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008098 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008099}
8100
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008101LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8102 armnn::IWorkloadFactory& workloadFactory,
8103 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008104{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008105 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008106}
8107
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008108LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8109 armnn::IWorkloadFactory& workloadFactory,
8110 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008111{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008112 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008113}
8114
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008115LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8116 armnn::IWorkloadFactory& workloadFactory,
8117 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008118{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008119 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008120}
8121
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008122LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8123 armnn::IWorkloadFactory& workloadFactory,
8124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008125{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008126 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008127}
8128
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008129LayerTestResult<float, 4> L2Pooling2dSize7Test(
8130 armnn::IWorkloadFactory& workloadFactory,
8131 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008132{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008133 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008134}
8135
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008136LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8137 armnn::IWorkloadFactory& workloadFactory,
8138 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008139{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008140 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008141}
8142
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008143LayerTestResult<float, 4> L2Pooling2dSize9Test(
8144 armnn::IWorkloadFactory& workloadFactory,
8145 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008146{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008147 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008148}
8149
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008150LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8151 armnn::IWorkloadFactory& workloadFactory,
8152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008153{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008154 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008155}
8156
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008157LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
8158 armnn::IWorkloadFactory& workloadFactory,
8159 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008160{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008161 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008162}
8163
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008164LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
8165 armnn::IWorkloadFactory& workloadFactory,
8166 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008167{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008168 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008169}
8170
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008171LayerTestResult<float, 4> ComparePooling2dTest(
8172 armnn::IWorkloadFactory& workloadFactory,
8173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8174 armnn::IWorkloadFactory& refWorkloadFactory,
8175 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008176{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008177 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008178 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00008179}
8180
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008181LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
8182 armnn::IWorkloadFactory& workloadFactory,
8183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8184 armnn::IWorkloadFactory& refWorkloadFactory,
8185 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008186{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008187 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008188 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008189}
8190
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008191LayerTestResult<float, 2> FullyConnectedLargeTest(
8192 armnn::IWorkloadFactory& workloadFactory,
8193 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8194 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00008195{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008196 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00008197}
8198
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008199LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8200 armnn::IWorkloadFactory& workloadFactory,
8201 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008202{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008203 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008204}
8205
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008206LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8207 armnn::IWorkloadFactory& workloadFactory,
8208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008209{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008210 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8211 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008212}
8213
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008214LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8215 armnn::IWorkloadFactory& workloadFactory,
8216 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008217{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008218 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008219}
8220
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008221LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8222 armnn::IWorkloadFactory& workloadFactory,
8223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008224{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008225 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8226 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008227}
8228
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008229LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8230 armnn::IWorkloadFactory& workloadFactory,
8231 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008232{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008233 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008234}
8235
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008236LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8237 armnn::IWorkloadFactory& workloadFactory,
8238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008239{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008240 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8241 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008242}
8243
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008244LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8245 armnn::IWorkloadFactory& workloadFactory,
8246 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008247{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008248 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8249 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008250}
8251
8252LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008253 armnn::IWorkloadFactory& workloadFactory,
8254 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008255{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008256 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8257 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008258}
8259
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008260LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8261 armnn::IWorkloadFactory& workloadFactory,
8262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008263{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008264 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008265}
8266
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008267LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8268 armnn::IWorkloadFactory& workloadFactory,
8269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008270{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008271 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8272 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008273}
8274
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008275LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
8276 armnn::IWorkloadFactory& workloadFactory,
8277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008278{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008279 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008280}
8281
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008282LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
8283 armnn::IWorkloadFactory& workloadFactory,
8284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008285{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008286 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008287}
8288
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008289LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
8290 armnn::IWorkloadFactory& workloadFactory,
8291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008292{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008293 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008294}
8295
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008296LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
8297 armnn::IWorkloadFactory& workloadFactory,
8298 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008299{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008300 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008301}
8302
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008303LayerTestResult<float, 4> SimplePermuteFloat32Test(
8304 armnn::IWorkloadFactory& workloadFactory,
8305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008306{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008307 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008308};
8309
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008310LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
8311 armnn::IWorkloadFactory& workloadFactory,
8312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008313{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008314 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008315};
surmeh01bceff2f2018-03-29 16:29:27 +01008316
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008317LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
8318 armnn::IWorkloadFactory& workloadFactory,
8319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008320{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008321 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008322};
8323
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008324LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
8325 armnn::IWorkloadFactory& workloadFactory,
8326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008327{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008328 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008329};
8330
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008331LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
8332 armnn::IWorkloadFactory& workloadFactory,
8333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008334{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008335 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01008336};
8337
8338namespace
8339{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008340
narpra011e4c31d2018-09-28 11:07:51 +01008341template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008342LayerTestResult<T, OutputDim> MeanTestHelper(
8343 armnn::IWorkloadFactory& workloadFactory,
8344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8345 const unsigned int* inputShape,
8346 const std::vector<T>& inputData,
8347 const std::vector<unsigned int>& axis,
8348 bool keepDims,
8349 const unsigned int* outputShape,
8350 const std::vector<T>& outputData,
8351 float scale = 1.0f,
8352 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01008353{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008354 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01008355
8356 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
8357 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
8358
8359 inputTensorInfo.SetQuantizationScale(scale);
8360 inputTensorInfo.SetQuantizationOffset(offset);
8361
8362 outputTensorInfo.SetQuantizationScale(scale);
8363 outputTensorInfo.SetQuantizationOffset(offset);
8364
8365 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
8366
8367 LayerTestResult<T, OutputDim> result(outputTensorInfo);
8368 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
8369
8370 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
8371 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8372
8373 armnn::MeanQueueDescriptor data;
8374 data.m_Parameters.m_Axis = axis;
8375 data.m_Parameters.m_KeepDims = keepDims;
8376 armnn::WorkloadInfo info;
8377 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
8378 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8379
8380 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
8381
8382 inputHandle->Allocate();
8383 outputHandle->Allocate();
8384
8385 CopyDataToITensorHandle(inputHandle.get(), input.origin());
8386
narpra011e4c31d2018-09-28 11:07:51 +01008387 workload->Execute();
8388
8389 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
8390
8391 return result;
8392}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008393
narpra011e4c31d2018-09-28 11:07:51 +01008394} // anonymous namespace
8395
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008396LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
8397 armnn::IWorkloadFactory& workloadFactory,
8398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008399{
8400 const unsigned int inputShape[] = { 3, 2 };
8401 const unsigned int outputShape[] = { 1 };
8402
8403 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8404 std::vector<uint8_t> output({ 2 });
8405
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008406 return MeanTestHelper<uint8_t, 2, 1>(
8407 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008408}
8409
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008410LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
8411 armnn::IWorkloadFactory& workloadFactory,
8412 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008413{
8414 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8415 const unsigned int outputShape[] = { 1, 1, 2 };
8416
8417 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8418 std::vector<uint8_t> output({ 2, 2 });
8419
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008420 return MeanTestHelper<uint8_t, 4, 3>(
8421 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008422}
8423
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008424LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
8425 armnn::IWorkloadFactory& workloadFactory,
8426 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008427{
8428 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8429 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8430
8431 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8432 std::vector<uint8_t> output({ 2, 2 });
8433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008434 return MeanTestHelper<uint8_t, 4, 4>(
8435 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008436}
8437
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008438LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
8439 armnn::IWorkloadFactory& workloadFactory,
8440 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008441{
8442 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8443 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8444
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008445 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01008446 std::vector<uint8_t> output({ 1, 3, 5 });
8447
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008448 return MeanTestHelper<uint8_t, 4, 4>(
8449 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008450}
8451
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008452LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
8453 armnn::IWorkloadFactory& workloadFactory,
8454 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008455{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008456 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008457 const unsigned int outputShape[] = { 2 };
8458
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008459 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
8460 24 });
8461 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01008462
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008463 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
8464 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008465 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01008466}
8467
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008468LayerTestResult<float, 1> MeanFloatSimpleTest(
8469 armnn::IWorkloadFactory& workloadFactory,
8470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008471{
8472 const unsigned int inputShape[] = { 3, 2 };
8473 const unsigned int outputShape[] = { 1 };
8474
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008475 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8476 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008477
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008478 return MeanTestHelper<float, 2, 1>(
8479 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008480}
8481
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008482LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
8483 armnn::IWorkloadFactory& workloadFactory,
8484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008485{
8486 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8487 const unsigned int outputShape[] = { 3, 1, 2 };
8488
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008489 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8490 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008491
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008492 return MeanTestHelper<float, 4, 3>(
8493 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008494}
8495
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008496LayerTestResult<float, 4> MeanFloatKeepDimsTest(
8497 armnn::IWorkloadFactory& workloadFactory,
8498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008499{
8500 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8501 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8502
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008503 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8504 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008505
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008506 return MeanTestHelper<float, 4, 4>(
8507 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008508}
8509
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008510LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
8511 armnn::IWorkloadFactory& workloadFactory,
8512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008513{
8514 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8515 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8516
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008517 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8518 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008519
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008520 return MeanTestHelper<float, 4, 4>(
8521 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008522}
8523
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008524LayerTestResult<float, 1> MeanVtsFloat1Test(
8525 armnn::IWorkloadFactory& workloadFactory,
8526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008527{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008528 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008529 const unsigned int outputShape[] = { 2 };
8530
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008531 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8532 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8533 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008534
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008535 return MeanTestHelper<float, 3, 1>(
8536 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008537}
8538
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008539LayerTestResult<float, 3> MeanVtsFloat2Test(
8540 armnn::IWorkloadFactory& workloadFactory,
8541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008542{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008543 const unsigned int inputShape[] = { 4, 3, 2 };
8544 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01008545
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008546 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8547 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8548 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008549
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008550 return MeanTestHelper<float, 3, 3>(
8551 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008552}
8553
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008554LayerTestResult<float, 3> MeanVtsFloat3Test(
8555 armnn::IWorkloadFactory& workloadFactory,
8556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008557{
8558 const unsigned int inputShape[] = { 1, 2, 2, 1 };
8559 const unsigned int outputShape[] = { 1, 2, 1 };
8560
8561 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
8562 std::vector<float> output({ 1.5f, 3.5f });
8563
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008564 return MeanTestHelper<float, 4, 3>(
8565 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008566}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008567
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008568LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
8569 armnn::IWorkloadFactory& workloadFactory,
8570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008571{
8572 // Create Initial Tensor
8573 // 1, 2, 3
8574 // 4, 5, 6
8575 // 7, 8, 9
8576
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008577 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
8578 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008579
8580 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
8581 {1, 2, 3,
8582 4, 5, 6,
8583 7, 8, 9
8584 });
8585
8586 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
8587 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
8588 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
8589 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
8590
8591 // Apply MaxPool poolSize = 1x1, stride=2x2
8592 // Result =
8593 // 1, 3
8594 // 7, 9
8595 armnn::Pooling2dDescriptor descriptor;
8596 descriptor.m_PoolHeight = 1;
8597 descriptor.m_PoolWidth = 1;
8598 descriptor.m_StrideX = 2;
8599 descriptor.m_StrideY = 2;
8600 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
8601
8602 armnn::Pooling2dQueueDescriptor queueDescriptor;
8603 queueDescriptor.m_Parameters = descriptor;
8604 armnn::WorkloadInfo workloadInfo;
8605 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
8606 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
8607
8608 // Create the MaxPool
8609 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
8610
8611 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
8612 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
8613 boost::multi_array<float, 4> resultMaxPool;
8614 resultMaxPool.resize(shape);
8615
8616
8617 // Create addition with another tensor the same size
8618 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
8619 // with the initial tensor.
8620 // 12, 16
8621 // 24, 28
8622
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008623 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
8624 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008625
8626 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
8627 {12, 16,
8628 24, 28,
8629 });
8630
8631 // Expected output tensor after MaxPool and Addition.
8632 LayerTestResult<float,4> addRet(addOutputTensorInfo);
8633 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
8634 {
8635 13, 19,
8636 31, 37
8637 }));
8638
8639 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
8640 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
8641
8642 armnn::AdditionQueueDescriptor data;
8643 armnn::WorkloadInfo info;
8644
8645 // Add the output of the MaxPool and the new tensor
8646 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
8647 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
8648 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
8649
8650 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
8651
8652 poolingInputHandle->Allocate();
8653 poolingOutputHandle->Allocate();
8654 addInputHandle->Allocate();
8655 addOutputHandle->Allocate();
8656
8657 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
8658 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
8659
8660 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
8661 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
8662
8663 workload->Execute();
8664 addWorkload->Execute();
8665
8666 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
8667
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008668 return addRet;
8669}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008670
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008671LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
8672 armnn::IWorkloadFactory& workloadFactory,
8673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008674{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008675 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008676}
8677
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008678LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
8679 armnn::IWorkloadFactory& workloadFactory,
8680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008681{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008682 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008683}
8684
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008685LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
8686 armnn::IWorkloadFactory& workloadFactory,
8687 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008688{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008689 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008690}
8691
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008692LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
8693 armnn::IWorkloadFactory& workloadFactory,
8694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008695{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008696 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008697}
8698
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008699LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
8700 armnn::IWorkloadFactory& workloadFactory,
8701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008702{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008703 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008704}
8705
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008706LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
8707 armnn::IWorkloadFactory& workloadFactory,
8708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008709{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008710 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008711}
8712
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008713LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
8714 armnn::IWorkloadFactory& workloadFactory,
8715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008716{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008717 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008718}
8719
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008720LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
8721 armnn::IWorkloadFactory& workloadFactory,
8722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008723{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008724 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008725}
8726
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008727LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
8728 armnn::IWorkloadFactory& workloadFactory,
8729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008730{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008731 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008732}
8733
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008734LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
8735 armnn::IWorkloadFactory& workloadFactory,
8736 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008737{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008738 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008739}
8740
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008741LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
8742 armnn::IWorkloadFactory& workloadFactory,
8743 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008744{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008745 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008746}
8747
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008748LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
8749 armnn::IWorkloadFactory& workloadFactory,
8750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008751{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008752 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008753}
8754
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008755LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
8756 armnn::IWorkloadFactory& workloadFactory,
8757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008758{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008759 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008760}
8761
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008762LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
8763 armnn::IWorkloadFactory& workloadFactory,
8764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008765{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008766 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008767}
8768
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008769LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
8770 armnn::IWorkloadFactory& workloadFactory,
8771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008772{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008773 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008774}
8775
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008776LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
8777 armnn::IWorkloadFactory& workloadFactory,
8778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008779{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008780 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008781}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008782
8783namespace {
8784
8785template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008786LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
8787 armnn::IWorkloadFactory &workloadFactory,
8788 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8789 const armnn::DataLayout& dataLayout,
8790 const unsigned int *inputShape,
8791 const std::vector<T> &inputData,
8792 const std::vector<unsigned int> &blockShape,
8793 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
8794 const unsigned int *outputShape,
8795 const std::vector<T> &outputData,
8796 float scale = 1.0f,
8797 int32_t offset = 0)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008798 {
8799 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
8800
8801 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
8802 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
8803
8804 inputTensorInfo.SetQuantizationScale(scale);
8805 inputTensorInfo.SetQuantizationOffset(offset);
8806
8807 outputTensorInfo.SetQuantizationScale(scale);
8808 outputTensorInfo.SetQuantizationOffset(offset);
8809
8810 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
8811
8812 LayerTestResult<T, OutputDim> result(outputTensorInfo);
8813 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
8814
8815 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
8816 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8817
8818 armnn::BatchToSpaceNdQueueDescriptor data;
8819 data.m_Parameters.m_DataLayout = dataLayout;
8820 data.m_Parameters.m_BlockShape = blockShape;
8821 data.m_Parameters.m_Crops = crops;
8822 armnn::WorkloadInfo info;
8823 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
8824 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8825
8826 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
8827
8828 inputHandle->Allocate();
8829 outputHandle->Allocate();
8830
8831 CopyDataToITensorHandle(inputHandle.get(), input.origin());
8832
8833 workload->Execute();
8834
8835 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8836
8837 return result;
8838}
8839
8840} // anonymous namespace
8841
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008842LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
8843 armnn::IWorkloadFactory& workloadFactory,
8844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008845{
8846 const unsigned int inputShape[] = {4, 2, 2, 1};
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008847 const unsigned int outputShape[] = {1, 4, 4, 1};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008848
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008849 std::vector<float> input({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008850 // Batch 0, Height 0, Width (2) x Channel (1)
8851 1.0f, 3.0f,
8852 // Batch 0, Height 1, Width (2) x Channel (1)
8853 9.0f, 11.0f,
8854
8855
8856 // Batch 1, Height 0, Width (2) x Channel (1)
8857 2.0f, 4.0f,
8858 // Batch 1, Height 1, Width (2) x Channel (1)
8859 10.0f, 12.0f,
8860
8861
8862 // Batch 2, Height 0, Width (2) x Channel (1)
8863 5.0f, 7.0f,
8864 // Batch 2, Height 1, Width (2) x Channel (1)
8865 13.0f, 15.0f,
8866
8867 // Batch 3, Height 0, Width (2) x Channel (3)
8868 6.0f, 8.0f,
8869 // Batch 3, Height 1, Width (2) x Channel (1)
8870 14.0f, 16.0f
8871 });
8872
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008873 std::vector<float> expectedOutput({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008874 1.0f, 2.0f, 3.0f, 4.0f,
8875 5.0f, 6.0f, 7.0f, 8.0f,
8876 9.0f, 10.0f, 11.0f, 12.0f,
8877 13.0f, 14.0f, 15.0f, 16.0f
8878 });
8879
8880 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008881 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008882
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008883 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8884 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008885 crops, outputShape, expectedOutput);
8886}
8887
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008888LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
8889 armnn::IWorkloadFactory& workloadFactory,
8890 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008891{
8892 const unsigned int inputShape[] = {4, 1, 1, 1};
8893 const unsigned int outputShape[] = {1, 2, 2, 1};
8894
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008895 std::vector<float> input({
8896 // Batch 0, Height 0, Width (2) x Channel (1)
8897 1.0f, 2.0f, 3.0f, 4.0f
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008898 });
8899
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008900 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008901
8902 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008903 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008904
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008905 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8906 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8907 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008908}
8909
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008910LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
8911 armnn::IWorkloadFactory& workloadFactory,
8912 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008913{
8914 const unsigned int inputShape[] = {4, 1, 1, 3};
8915 const unsigned int outputShape[] = {1, 2, 2, 3};
8916
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008917 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008918
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008919 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008920
8921 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008922 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008923
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008924 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8925 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8926 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008927}
8928
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008929LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test4(
8930 armnn::IWorkloadFactory& workloadFactory,
8931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8932{
8933 const unsigned int inputShape[] = {8, 1, 3, 1};
8934 const unsigned int outputShape[] = {2, 2, 4, 1};
8935
8936 std::vector<float> input({
8937 0.0f, 1.0f, 3.0f,
8938 0.0f, 9.0f, 11.0f,
8939 0.0f, 2.0f, 4.0f,
8940 0.0f, 10.0f, 12.0f,
8941 0.0f, 5.0f, 7.0f,
8942 0.0f, 13.0f, 15.0f,
8943 0.0f, 6.0f, 8.0f,
8944 0.0f, 14.0f, 16.0f
8945 });
8946
8947 std::vector<float> expectedOutput({
8948 1.0f, 2.0f, 3.0f, 4.0f,
8949 5.0f, 6.0f, 7.0f, 8.0f,
8950 9.0f, 10.0f, 11.0f, 12.0f,
8951 13.0f, 14.0f, 15.0f, 16.0f
8952 });
8953
8954 std::vector<unsigned int> blockShape({2, 2});
8955 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
8956
8957 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8958 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8959 crops, outputShape, expectedOutput);
8960}
8961
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008962LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
8963 armnn::IWorkloadFactory &workloadFactory,
8964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008965{
8966 const unsigned int inputShape[] = {4, 3, 1, 1};
8967 const unsigned int outputShape[] = {1, 3, 2, 2};
8968
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008969 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008970
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008971 std::vector<float> expectedOutput({
8972 // Batch 0, Channel 0, Height (2) x Width (2)
8973 1.0f, 4.0f,
8974 7.0f, 10.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008975
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008976 // Batch 0, Channel 1, Height (2) x Width (2)
8977 2.0f, 5.0f,
8978 8.0f, 11.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008979
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008980 // Batch 0, Channel 2, Height (2) x Width (2)
8981 3.0f, 6.0f,
8982 9.0f, 12.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008983 });
8984
8985 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00008986 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008987
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008988 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8989 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8990 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008991}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008992
Mike Kelly831faed2018-11-28 11:52:08 +00008993LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00008994 armnn::IWorkloadFactory& workloadFactory,
8995 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00008996{
8997 const unsigned int inputShape[] = {4, 1, 1, 1};
8998 const unsigned int outputShape[] = {1, 1, 2, 2};
8999
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009000 std::vector<float> input({
9001 // Batch 0, Height 0, Width (2) x Channel (1)
9002 1.0f, 2.0f, 3.0f, 4.0f
9003 });
Mike Kelly831faed2018-11-28 11:52:08 +00009004
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009005 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009006
9007 std::vector<unsigned int> blockShape({2, 2});
9008 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9009
9010 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9011 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9012 crops, outputShape, expectedOutput);
9013}
9014
9015LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009016 armnn::IWorkloadFactory& workloadFactory,
9017 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009018{
9019 const unsigned int inputShape[] = {4, 3, 1, 1};
9020 const unsigned int outputShape[] = {1, 3, 2, 2};
9021
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009022 std::vector<float> input({1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009023
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009024 std::vector<float> expectedOutput({
9025 // Batch 0, Channel 0, Height (2) x Width (2)
9026 1.0f, 7.0f,
9027 2.0f, 8.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009028
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009029 // Batch 0, Channel 1, Height (2) x Width (2)
9030 3.0f, 9.0f,
9031 4.0f, 10.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009032
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009033 // Batch 0, Channel 2, Height (2) x Width (2)
9034 5.0f, 11.0f,
9035 6.0f, 12.0f,
9036 });
Mike Kelly831faed2018-11-28 11:52:08 +00009037
9038 std::vector<unsigned int> blockShape({2, 2});
9039 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9040
9041 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9042 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9043 crops, outputShape, expectedOutput);
9044}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009045
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009046LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
9047 armnn::IWorkloadFactory& workloadFactory,
9048 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009049{
9050 const unsigned int inputShape[] = {4, 2, 2, 1};
9051 const unsigned int outputShape[] = {1, 4, 4, 1};
9052
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009053 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
9054 std::vector<uint8_t> expectedOutput({1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009055
9056 std::vector<unsigned int> blockShape({2, 2});
9057 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9058
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00009059 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
9060 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009061}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009062
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009063LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
9064 armnn::IWorkloadFactory& workloadFactory,
9065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9066{
9067 const unsigned int inputShape[] = {4, 1, 1, 1};
9068 const unsigned int outputShape[] = {1, 2, 2, 1};
9069
9070 std::vector<uint8_t> input({
9071 // Batch 0, Height 0, Width (2) x Channel (1)
9072 1, 2, 3, 4
9073 });
9074
9075 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9076
9077 std::vector<unsigned int> blockShape({2, 2});
9078 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9079
9080 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9081 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9082 crops, outputShape, expectedOutput);
9083}
9084
9085LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
9086 armnn::IWorkloadFactory& workloadFactory,
9087 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9088{
9089 const unsigned int inputShape[] = {4, 1, 1, 3};
9090 const unsigned int outputShape[] = {1, 2, 2, 3};
9091
9092 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9093
9094 std::vector<uint8_t> expectedOutput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9095
9096 std::vector<unsigned int> blockShape({2, 2});
9097 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9098
9099 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9100 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9101 crops, outputShape, expectedOutput);
9102}
9103
9104
9105LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
9106 armnn::IWorkloadFactory &workloadFactory,
9107 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9108{
9109 const unsigned int inputShape[] = {4, 3, 1, 1};
9110 const unsigned int outputShape[] = {1, 3, 2, 2};
9111
9112 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9113
9114 std::vector<uint8_t> expectedOutput({
9115 // Batch 0, Channel 0, Height (2) x Width (2)
9116 1, 4,
9117 7, 10,
9118
9119 // Batch 0, Channel 1, Height (2) x Width (2)
9120 2, 5,
9121 8, 11,
9122
9123 // Batch 0, Channel 2, Height (2) x Width (2)
9124 3, 6,
9125 9, 12,
9126 });
9127
9128 std::vector<unsigned int> blockShape({2, 2});
9129 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9130
9131 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9132 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9133 crops, outputShape, expectedOutput);
9134}
9135
9136LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
9137 armnn::IWorkloadFactory& workloadFactory,
9138 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9139{
9140 const unsigned int inputShape[] = {4, 1, 1, 1};
9141 const unsigned int outputShape[] = {1, 1, 2, 2};
9142
9143 std::vector<uint8_t> input({
9144 // Batch 0, Height 0, Width (2) x Channel (1)
9145 1, 2, 3, 4
9146 });
9147
9148 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9149
9150 std::vector<unsigned int> blockShape({2, 2});
9151 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9152
9153 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9154 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9155 crops, outputShape, expectedOutput);
9156}
9157
9158LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
9159 armnn::IWorkloadFactory& workloadFactory,
9160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9161{
9162 const unsigned int inputShape[] = {4, 3, 1, 1};
9163 const unsigned int outputShape[] = {1, 3, 2, 2};
9164
9165 std::vector<uint8_t> input({1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12});
9166
9167 std::vector<uint8_t> expectedOutput({
9168 // Batch 0, Channel 0, Height (2) x Width (2)
9169 1, 7,
9170 2, 8,
9171
9172 // Batch 0, Channel 1, Height (2) x Width (2)
9173 3, 9,
9174 4, 10,
9175
9176 // Batch 0, Channel 2, Height (2) x Width (2)
9177 5, 11,
9178 6, 12,
9179 });
9180
9181 std::vector<unsigned int> blockShape({2, 2});
9182 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9183
9184 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9185 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9186 crops, outputShape, expectedOutput);
9187}
9188
9189LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest4(
9190 armnn::IWorkloadFactory& workloadFactory,
9191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9192{
9193 const unsigned int inputShape[] = {8, 1, 1, 3};
9194 const unsigned int outputShape[] = {2, 1, 2, 4};
9195
9196 std::vector<uint8_t> input({
9197 0, 1, 3, 0, 9, 11,
9198 0, 2, 4, 0, 10, 12,
9199 0, 5, 7, 0, 13, 15,
9200 0, 6, 8, 0, 14, 16
9201 });
9202
9203 std::vector<uint8_t> expectedOutput({
9204 1, 2, 3, 4,
9205 5, 6, 7, 8,
9206 9, 10, 11, 12,
9207 13, 14, 15, 16
9208 });
9209
9210 std::vector<unsigned int> blockShape({2, 2});
9211 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9212
9213 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9214 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9215 crops, outputShape, expectedOutput);
9216}
9217
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009218LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9219 armnn::IWorkloadFactory& workloadFactory,
9220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9221{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009222 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009223}
9224
9225LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9226 armnn::IWorkloadFactory& workloadFactory,
9227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9228{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009229 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009230}
9231
9232LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9233 armnn::IWorkloadFactory& workloadFactory,
9234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9235{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009236 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009237}
9238
9239LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9240 armnn::IWorkloadFactory& workloadFactory,
9241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9242{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009243 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009244}
9245
9246LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9247 armnn::IWorkloadFactory& workloadFactory,
9248 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9249{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009250 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009251}
9252
9253LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9254 armnn::IWorkloadFactory& workloadFactory,
9255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9256{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009257 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009258}
9259
9260LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9261 armnn::IWorkloadFactory& workloadFactory,
9262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9263{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009264 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009265}
9266
9267LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9268 armnn::IWorkloadFactory& workloadFactory,
9269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9270{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009271 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009272}
9273
9274LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9275 armnn::IWorkloadFactory& workloadFactory,
9276 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9277{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009278 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009279}
9280
9281LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9282 armnn::IWorkloadFactory& workloadFactory,
9283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9284{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009285 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009286}
9287
9288LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9289 armnn::IWorkloadFactory& workloadFactory,
9290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9291{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009292 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009293}
9294
9295LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9296 armnn::IWorkloadFactory& workloadFactory,
9297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9298{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009299 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009300}
9301
9302LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9303 armnn::IWorkloadFactory& workloadFactory,
9304 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9305{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009306 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009307}
9308
9309LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9310 armnn::IWorkloadFactory& workloadFactory,
9311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9312{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009313 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009314}
9315
9316LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9317 armnn::IWorkloadFactory& workloadFactory,
9318 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9319{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009320 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009321}
9322
9323LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9324 armnn::IWorkloadFactory& workloadFactory,
9325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9326{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009327 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009328}
9329
9330LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9331 armnn::IWorkloadFactory& workloadFactory,
9332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9333{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009334 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009335}
9336
9337LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9338 armnn::IWorkloadFactory& workloadFactory,
9339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9340{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009341 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009342}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009343
9344LayerTestResult<float, 4> Debug4DFloat32Test(
9345 armnn::IWorkloadFactory& workloadFactory,
9346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9347{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009348 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009349}
9350
9351LayerTestResult<float, 3> Debug3DFloat32Test(
9352 armnn::IWorkloadFactory& workloadFactory,
9353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9354{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009355 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009356}
9357
9358LayerTestResult<float, 2> Debug2DFloat32Test(
9359 armnn::IWorkloadFactory& workloadFactory,
9360 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9361{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009362 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009363}
9364
9365LayerTestResult<float, 1> Debug1DFloat32Test(
9366 armnn::IWorkloadFactory& workloadFactory,
9367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9368{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009369 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009370}
9371
9372LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9373 armnn::IWorkloadFactory& workloadFactory,
9374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9375{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009376 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009377}
9378
9379LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9380 armnn::IWorkloadFactory& workloadFactory,
9381 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9382{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009383 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009384}
9385
9386LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9387 armnn::IWorkloadFactory& workloadFactory,
9388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9389{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009390 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009391}
9392
9393LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9394 armnn::IWorkloadFactory& workloadFactory,
9395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9396{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009397 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009398}
Matteo Martincigh49124022019-01-11 13:25:59 +00009399
narpra014951d842019-01-18 16:53:53 +00009400LayerTestResult<float, 1> Gather1DParamsFloatTest(
9401 armnn::IWorkloadFactory& workloadFactory,
9402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9403{
9404 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9405}
9406
9407LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9408 armnn::IWorkloadFactory& workloadFactory,
9409 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9410{
9411 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9412}
9413
9414LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9415 armnn::IWorkloadFactory& workloadFactory,
9416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9417{
9418 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9419}
9420
9421LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9422 armnn::IWorkloadFactory& workloadFactory,
9423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9424{
9425 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9426}
9427
9428LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9429 armnn::IWorkloadFactory& workloadFactory,
9430 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9431{
9432 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9433}
9434
9435LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9436 armnn::IWorkloadFactory& workloadFactory,
9437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9438{
9439 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9440 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009441}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009442
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009443LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009444 armnn::IWorkloadFactory& workloadFactory,
9445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9446{
9447 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9448}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009449
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009450LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9451 armnn::IWorkloadFactory& workloadFactory,
9452 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9453{
9454 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9455}
9456
9457LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9458 armnn::IWorkloadFactory& workloadFactory,
9459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9460{
9461 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9462}
9463
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009464LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9465 armnn::IWorkloadFactory& workloadFactory,
9466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9467{
9468 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9469}
9470
9471LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9472 armnn::IWorkloadFactory& workloadFactory,
9473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9474{
9475 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9476}
9477
9478LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9479 armnn::IWorkloadFactory& workloadFactory,
9480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9481{
9482 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9483}