blob: 52c9773b9fd6a00b9297b65d7509055a49084500 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
33#include "ReshapeTestImpl.hpp"
34#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000035#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000036#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000046#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010047#include "QuantizeTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
telsoa01c577f2c2018-08-31 09:22:23 +010080// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000081template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +000082boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
83{
84 if(biasEnabled)
85 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000086 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000087 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
88 return bias;
89 }
90 else
91 {
92 return boost::multi_array<T, 1>();
93 }
94}
95
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000096template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000097LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
98 armnn::IWorkloadFactory& workloadFactory,
99 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
100 float qScale,
101 int32_t qOffset,
102 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000103 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000104{
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000106 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000107 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
108
telsoa01c577f2c2018-08-31 09:22:23 +0100109 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000110 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000111 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
112 QuantizedVector<T>(qScale, qOffset, {
113 1, 1, 1,
114 1, -1, 1,
115 1, 1, 1,
116 1, 1, 1,
117 1, 1, 1,
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130
131
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0
149 })));
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000152 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000153 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
154 QuantizedVector<T>(qScale, qOffset, {
155 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
156 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f,
161
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
166 })));
167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000168 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
169 workloadFactory,
170 memoryManager,
171 input,
172 kernel,
173 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
174 expectedOutput,
175 qScale,
176 qOffset,
177 layout);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000180template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
181 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000182LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
183 armnn::IWorkloadFactory& workloadFactory,
184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
185 float qScale,
186 int32_t qOffset,
187 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000188 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000193 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000194 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000197 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000198 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
199 QuantizedVector<T>(qScale, qOffset, {
200 1, 1, 1,
201 1, -1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0,
207
208 2, 2, 2,
209 2, 2, 2,
210 2, 2, 2,
211
212
213 0, 0, 0,
214 0, 0, 0,
215 0, 0, 0,
216
217 1, 1, 1,
218 1, 1, 1,
219 1, 1, 1,
220
221 0, 0, 0,
222 0, 0, 0,
223 0, 0, 0
224 })));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
231 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
243 })));
244
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000245 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
246 workloadFactory,
247 memoryManager,
248 input,
249 kernel,
250 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
251 expectedOutput,
252 qScale,
253 qOffset,
254 layout);
telsoa014fcda012018-03-09 14:13:49 +0000255}
256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000258LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
259 armnn::IWorkloadFactory& workloadFactory,
260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
261 float qScale,
262 int32_t qOffset,
263 bool biasEnabled,
264 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100265{
266 // Use common single-batch 5x5 image.
267
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000268 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
270 {
271 1, 5, 2, 3,
272 8, 7, 3, 6,
273 3, 3, 9, 1
274 });
275
276
277 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000278 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100279 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
280 4, 5, 6,
281 0, 0, 0,
282 3, 2, 1
283 });
284
285 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000286 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100287
288 const std::vector<float> outputData =
289 {
290 23, 41, 33, 21,
291 44, 65, 76, 52,
292 82, 85, 79, 42
293 };
294
295 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
296
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000297 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
298 workloadFactory,
299 memoryManager,
300 input,
301 kernel,
302 boost::multi_array<T, 1>(),
303 expectedOutput,
304 dataLayout,
305 qScale,
306 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100307}
308
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000310LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 float qScale,
314 int32_t qOffset,
315 bool biasEnabled,
316 const armnn::DataLayout& dataLayout)
317{
318 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000320 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
321 {
322 1, 5, 2, 3, 5,
323 8, 7, 3, 6, 3,
324 3, 3, 9, 1, 9,
325 4, 1, 8, 1, 3,
326 6, 8, 1, 9, 2
327 });
328
329 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000330 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000331 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
332 {
333 4, 5, 6,
334 0, 0, 0,
335 3, 2, 1
336 });
337
338 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000340
341 const std::vector<T> outputData =
342 {
343 23, 33, 24,
344 91, 99, 48,
345 26, 50, 19
346 };
347
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
349
350 uint32_t padLeft = 1;
351 uint32_t padTop = 1;
352 uint32_t padRight = 1;
353 uint32_t padBottom = 1;
354 uint32_t strideX = 2;
355 uint32_t strideY = 2;
356
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000357 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
358 workloadFactory,
359 memoryManager,
360 input,
361 kernel,
362 boost::multi_array<T, 1>(),
363 expectedOutput,
364 dataLayout,
365 qScale,
366 qOffset,
367 padLeft,
368 padTop,
369 padRight,
370 padBottom,
371 strideX,
372 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000373}
374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000379 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000381 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
382 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000383}
384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000385LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
386 armnn::IWorkloadFactory& workloadFactory,
387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
392 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000395LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000399 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000401 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
402 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000410 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
411 workloadFactory,
412 memoryManager,
413 0.f,
414 0,
415 biasEnabled,
416 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100417}
418
Mike Kelly7332ed82018-12-20 17:03:06 +0000419LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
420 armnn::IWorkloadFactory& workloadFactory,
421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
422 bool biasEnabled,
423 const armnn::DataLayout layout)
424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000425 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
426 workloadFactory,
427 memoryManager,
428 0.f,
429 0,
430 biasEnabled,
431 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000432}
433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000434LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
435 armnn::IWorkloadFactory& workloadFactory,
436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000442}
443
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000444template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
445 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000446LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
447 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000448 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000449 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000450 float qScale,
451 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000452{
telsoa01c577f2c2018-08-31 09:22:23 +0100453 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000454 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000455 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
456 QuantizedVector<T>(qScale, qOffset, {
457 11,21,31,
458 12,22,32,
459 13,23,33
460 })));
461
telsoa01c577f2c2018-08-31 09:22:23 +0100462 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000463 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000464 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
465 QuantizedVector<T>(qScale, qOffset, {
466 -11,-21,
467 -12,-22,
468 })));
469
telsoa01c577f2c2018-08-31 09:22:23 +0100470// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000471// Manually calculated like this:
472//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
473//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
474//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
475//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
476//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
477//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
478//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000479 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000480 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
481 QuantizedVector<T>(qScale, qOffset, {
482 0, 0, 0, 0, 0, 0,
483 -242, -594, -934, -372, 0, 0,
484 -495, -1190, -1850, -725, 0, 0,
485 -538, -1256, -1916, -748, 0, 0,
486 -273, -626, -946, -363, 0, 0,
487 0, 0, 0, 0, 0, 0,
488 0, 0, 0, 0, 0, 0,
489 0, 0, 0, 0, 0, 0
490 })));
491
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000492 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
493 workloadFactory,
494 memoryManager,
495 input,
496 kernel,
497 GetBias2<ArmnnBType>(false, qScale, qOffset),
498 expectedOutput,
499 qScale,
500 qOffset,
501 layout,
502 1, // Padding left.
503 2, // Padding top.
504 3, // Padding right.
505 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000506}
507
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000508template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
509 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000510LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
511 armnn::IWorkloadFactory& workloadFactory,
512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000513 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000514 float qScale,
515 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000516{
telsoa01c577f2c2018-08-31 09:22:23 +0100517 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000518 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000519 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
520 QuantizedVector<T>(qScale, qOffset, {
521 11,21,31,41,51,
522 12,22,32,42,52,
523 13,23,33,43,53,
524 14,24,34,44,54,
525 15,25,35,45,55,
526 })));
527
telsoa01c577f2c2018-08-31 09:22:23 +0100528 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000529 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000530 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
531 QuantizedVector<T>(qScale, qOffset, {
532 -11,-21,-31,-41,
533 -12,-22,-32,-42,
534 -13,-23,-33,-43,
535 -14,-24,-34,-44,
536 })));
537
telsoa01c577f2c2018-08-31 09:22:23 +0100538 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000539 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000540 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
541 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
542 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000543 -7140, -10580, -13940, -9300, -5230,
544 -9590, -14120, -18520, -12290, -6860,
545 -9980, -14560, -18960, -12560, -7000,
546 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100547 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000548 })));
549
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000550 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
551 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000552 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000553 input,
554 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000555 GetBias2<ArmnnBType>(false, qScale, qOffset),
telsoa014fcda012018-03-09 14:13:49 +0000556 expectedOutput,
557 qScale,
558 qOffset,
narpra015f703182018-10-26 16:24:58 +0100559 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100560 1, // Padding left.
561 1, // Padding top.
562 2, // Padding right.
563 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100564}
565
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000566template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
567 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000568LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
569 armnn::IWorkloadFactory& workloadFactory,
570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
571 float qScale,
572 int32_t qOffset,
573 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000574 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100575{
telsoa01c577f2c2018-08-31 09:22:23 +0100576 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000577 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100578 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
579 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
580 0, 1, 2, 3, 4,
581 5, 6, 7, 8, 9,
582 10, 11, 12, 13, 14,
583 15, 16, 17, 18, 19,
584 20, 21, 22, 23, 24,
585
586 25, 26, 27, 28, 29,
587 30, 31, 32, 33, 34,
588 35, 36, 37, 38, 39,
589 40, 41, 42, 43, 44,
590 45, 46, 47, 48, 49
591 })));
592
telsoa01c577f2c2018-08-31 09:22:23 +0100593 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000594 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100595 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
596 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
597 32, 31, 30, 29,
598 28, 27, 26, 25,
599 24, 23, 22, 21,
600 20, 19, 18, 17,
601
602 16, 15, 14, 13,
603 12, 11, 10, 9,
604 8, 7, 6, 5,
605 4, 3, 2, 1
606 })));
607
telsoa01c577f2c2018-08-31 09:22:23 +0100608 // Expected output is 1 batch of a 2-channel 5x5 image.
609 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000610 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100611 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
612 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
613 1062, 1580, 1850, 1530, 1117,
614 2140, 3108, 3500, 2842, 2042,
615 3580, 5068, 5460, 4342, 3062,
616 3618, 5072, 5390, 4248, 2971,
617 3074, 4282, 4510, 3533, 2457,
618 1550, 2284, 2362, 1955, 1428,
619 2910, 4206, 4342, 3528, 2536,
620 3390, 4886, 5022, 4068, 2916,
621 3566, 5056, 5182, 4133, 2922,
622 3100, 4352, 4452, 3517, 2465
623 })));
624
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000625 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
626 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000627 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100628 input,
629 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000630 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
surmeh013537c2c2018-05-18 16:31:43 +0100631 expectedOutput,
632 qScale,
633 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100634 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100635 1, // Padding left.
636 1, // Padding top.
637 2, // Padding right.
638 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100639 1, // strideX
640 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000641}
642
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000643template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
644 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000645LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
646 armnn::IWorkloadFactory& workloadFactory,
647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
648 float qScale,
649 int32_t qOffset,
650 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100651{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000652 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100653 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
654 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
655 0, 25,
656 1, 26,
657 2, 27,
658 3, 28,
659 4, 29,
660
661 5, 30,
662 6, 31,
663 7, 32,
664 8, 33,
665 9, 34,
666
667 10, 35,
668 11, 36,
669 12, 37,
670 13, 38,
671 14, 39,
672
673 15, 40,
674 16, 41,
675 17, 42,
676 18, 43,
677 19, 44,
678
679 20, 45,
680 21, 46,
681 22, 47,
682 23, 48,
683 24, 49
684 })));
685
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000686 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100687 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
688 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
Matteo Martincigh747ef822018-12-18 09:26:39 +0000689 32, 31, 30, 29,
690 28, 27, 26, 25,
691 24, 23, 22, 21,
692 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100693
Matteo Martincigh747ef822018-12-18 09:26:39 +0000694 16, 15, 14, 13,
695 12, 11, 10, 9,
696 8, 7, 6, 5,
697 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +0100698 })));
699
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000700 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100701 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
702 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
703 1062, 1550,
704 1580, 2284,
705 1850, 2362,
706 1530, 1955,
707 1117, 1428,
708
709 2140, 2910,
710 3108, 4206,
711 3500, 4342,
712 2842, 3528,
713 2042, 2536,
714
715 3580, 3390,
716 5068, 4886,
717 5460, 5022,
718 4342, 4068,
719 3062, 2916,
720
721 3618, 3566,
722 5072, 5056,
723 5390, 5182,
724 4248, 4133,
725 2971, 2922,
726
727 3074, 3100,
728 4282, 4352,
729 4510, 4452,
730 3533, 3517,
731 2457, 2465
732 })));
733
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000734 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
735 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000736 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100737 input,
738 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000739 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
Nikhil Rajcec6b652018-10-12 13:51:57 +0100740 expectedOutput,
741 qScale,
742 qOffset,
743 1, // Padding left.
744 1, // Padding top.
745 2, // Padding right.
746 2, // Padding bottom.
747 1, // strideX
748 1); // strideY
749}
750
Bruno Goncalves22972f02019-04-26 21:03:24 -0300751template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
752 typename T = armnn::ResolveType<ArmnnType>>
753LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
754 armnn::IWorkloadFactory& workloadFactory,
755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
756 float qScale,
757 int32_t qOffset,
758 bool biasEnabled)
759{
760 armnn::TensorInfo inputTensorInfo({ 1, 9, 9, 1}, ArmnnType);
761 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
762 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
763 0, 0, 0, 0, 0, 0, 0, 0, 0,
764 0, 0, 0, 0, 0, 0, 0, 0, 0,
765 0, 0, 0, 0, 0, 0, 0, 0, 0,
766 0, 0, 0, 1, 1, 1, 0, 0, 0,
767 0, 0, 0, 1, 1, 1, 0, 0, 0,
768 0, 0, 0, 1, 1, 1, 0, 0, 0,
769 0, 0, 0, 0, 0, 0, 0, 0, 0,
770 0, 0, 0, 0, 0, 0, 0, 0, 0,
771 0, 0, 0, 0, 0, 0, 0, 0, 0
772 })));
773
774 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
775 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
776 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
777 1, 2, 3,
778 4, 5, 6,
779 7, 8, 9
780 })));
781
782 uint32_t padLeft = 0;
783 uint32_t padTop = 0;
784 uint32_t padRight = 0;
785 uint32_t padBottom = 0;
786 uint32_t strideX = 1;
787 uint32_t strideY = 1;
788 uint32_t dilationX = 3;
789 uint32_t dilationY = 3;
790
791 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
792 armnn::TensorInfo outputTensorInfo({ 1, 3, 3, 1}, ArmnnType);
793 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
794 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
795 5, 5, 5,
796 5, 5, 5,
797 5, 5, 5
798 })));
799
800 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
801 workloadFactory,
802 memoryManager,
803 input,
804 kernel,
805 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
806 expectedOutput,
807 qScale,
808 qOffset,
809 padLeft,
810 padTop,
811 padRight,
812 padBottom,
813 strideX,
814 strideY,
815 dilationX,
816 dilationY);
817
818}
819
telsoa014fcda012018-03-09 14:13:49 +0000820LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000821Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
822 armnn::IWorkloadFactory& workloadFactory,
823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000824 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000825{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000826 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
827 <armnn::DataType::Float32, armnn::DataType::Float32>(
828 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000829}
830
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000831LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
832 armnn::IWorkloadFactory& workloadFactory,
833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000834 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000835{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000836 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000837 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000838}
839
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000840LayerTestResult<float, 4> DepthwiseConvolution2dTest(
841 armnn::IWorkloadFactory& workloadFactory,
842 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
843 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000844 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000845{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000846 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000847 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000848}
849
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000850LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
851 armnn::IWorkloadFactory& workloadFactory,
852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
853 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100854{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000855 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
856 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100857}
858
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000859LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
860 armnn::IWorkloadFactory& workloadFactory,
861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
862 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000863 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000864{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000865 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000866 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000867}
868
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000869LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
870 armnn::IWorkloadFactory& workloadFactory,
871 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
872 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000873 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100874{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000875 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000876 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100877}
878
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000879LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
880 armnn::IWorkloadFactory& workloadFactory,
881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
882 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000883 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000884{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000885 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000886 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000887}
888
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000889LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
890 armnn::IWorkloadFactory& workloadFactory,
891 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
892 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000893 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000894{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000895 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000896 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000897}
898
Bruno Goncalves22972f02019-04-26 21:03:24 -0300899LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
900 armnn::IWorkloadFactory& workloadFactory,
901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
902{
903 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
904 workloadFactory,
905 memoryManager,
906 0.f,
907 0,
908 false);
909}
910
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000911LayerTestResult<float, 4> Convolution1dTest(
912 armnn::IWorkloadFactory& workloadFactory,
913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
914 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000915{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000916 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
917 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000918}
919
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000920LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
921 armnn::IWorkloadFactory& workloadFactory,
922 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
923 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000924{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000925 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
926 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000927}
928
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000929LayerTestResult<float,4> CompareConvolution2dTest(
930 armnn::IWorkloadFactory& workloadFactory,
931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
932 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000933{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000934 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
935 workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000936}
937
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000938LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000939 armnn::IWorkloadFactory& workloadFactory,
940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
941 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000942 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000943{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000944 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
945 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000946}
947
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000948LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
949 armnn::IWorkloadFactory& workloadFactory,
950 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
951 armnn::IWorkloadFactory& refWorkloadFactory,
952 const armnn::DataLayout layout)
953{
954 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
955 workloadFactory, memoryManager, refWorkloadFactory, layout);
956}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000957
958LayerTestResult<float,4> SimpleNormalizationAcrossTest(
959 armnn::IWorkloadFactory& workloadFactory,
960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000961{
962 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
963 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000964 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000965}
966
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000967LayerTestResult<float,4> SimpleNormalizationWithinTest(
968 armnn::IWorkloadFactory& workloadFactory,
969 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000970{
971 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
972 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000973 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000974}
975
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000976LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
977 armnn::IWorkloadFactory& workloadFactory,
978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +0100979{
980 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
981 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000982 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100983}
984
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000985LayerTestResult<float,2> SimpleSoftmaxTest(
986 armnn::IWorkloadFactory& workloadFactory,
987 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
988 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000989{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000990 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000991}
992
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000993LayerTestResult<float,3> Simple3dSoftmaxTest(
994 armnn::IWorkloadFactory& workloadFactory,
995 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
996 float beta)
997{
998 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
999}
1000
1001LayerTestResult<float,4> Simple4dSoftmaxTest(
1002 armnn::IWorkloadFactory& workloadFactory,
1003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1004 float beta)
1005{
1006 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1007}
1008
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001009LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1010 armnn::IWorkloadFactory& workloadFactory,
1011 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1012 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001013{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001014 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001015}
1016
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001017LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1018 armnn::IWorkloadFactory& workloadFactory,
1019 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1020 float beta)
1021{
1022 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1023}
1024
1025LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1026 armnn::IWorkloadFactory& workloadFactory,
1027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1028 float beta)
1029{
1030 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1031}
1032
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001033LayerTestResult<float,4> CompareNormalizationTest(
1034 armnn::IWorkloadFactory& workloadFactory,
1035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1036 armnn::IWorkloadFactory& refWorkloadFactory,
1037 armnn::NormalizationAlgorithmChannel normChannel,
1038 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001039{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001040 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001041}
1042
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001043LayerTestResult<float,2> CompareSoftmaxTest(
1044 armnn::IWorkloadFactory& workloadFactory,
1045 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001046 armnn::IWorkloadFactory& refWorkloadFactory,
1047 float beta)
1048{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001049 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1050 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001051}
1052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001053LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1054 armnn::IWorkloadFactory& workloadFactory,
1055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001056 armnn::IWorkloadFactory& refWorkloadFactory,
1057 float beta)
1058{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001059 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1060 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001061}
1062
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001063std::vector<LayerTestResult<float,3>> SplitterTest(
1064 armnn::IWorkloadFactory& workloadFactory,
1065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001066{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001067 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001068}
1069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001070std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1071 armnn::IWorkloadFactory& workloadFactory,
1072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001073{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001074 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001075}
1076
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001077LayerTestResult<float, 3> CopyViaSplitterTest(
1078 armnn::IWorkloadFactory& workloadFactory,
1079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001080{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001081 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001082}
1083
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001084LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1085 armnn::IWorkloadFactory& workloadFactory,
1086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001087{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001088 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001089}
1090
telsoa01c577f2c2018-08-31 09:22:23 +01001091LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001092 armnn::IWorkloadFactory& workloadFactory,
1093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001094{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001095 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001096 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1097 { 2., 3., 3., 4. }));
1098
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001099 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001100 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1101 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1102 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001103 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001104 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001105}
1106
1107LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001108 armnn::IWorkloadFactory& workloadFactory,
1109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001110{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001111 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001112 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1113 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1114 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1115
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001116 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001117 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1118 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1119 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1120 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1121 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1122 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1123 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1124 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001125 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1126 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001127}
1128
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001129LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1130 armnn::IWorkloadFactory& workloadFactory,
1131 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001132{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001133 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001134 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1135 {2., 3., 3., 4.}));
1136
1137
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001138 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001139 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1140 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1141 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1142
Conor Kennedyb9971c92019-05-07 07:14:23 +01001143 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001144 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001145}
1146
Conor Kennedyb9971c92019-05-07 07:14:23 +01001147LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1148 armnn::IWorkloadFactory& workloadFactory,
1149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1150{
1151 const float qScale = 1.0f;
1152 const int32_t qOffset = 0;
1153
1154 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1155 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1156
1157 armnn::TensorInfo inputDesc({2, 2}, datatype);
1158 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1159 std::vector<float>{2., 3., 3., 4.}));
1160
1161 armnn::TensorInfo outputDesc({2, 4}, datatype);
1162 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1163 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1164 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1165
1166 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1167 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1168
1169}
1170
1171LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1172 armnn::IWorkloadFactory& workloadFactory,
1173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1174{
1175 const float qScale = 1.0f;
1176 const int32_t qOffset = 0;
1177
1178 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1179 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1180
1181 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1182 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1183 std::vector<float>({ 2., 3., 3., 4. })));
1184
1185 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1186 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1187 qOffset, std::vector<float>(
1188 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1189 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1190
1191 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
1192 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1193}
1194
1195LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
1196 armnn::IWorkloadFactory& workloadFactory,
1197 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1198{
1199 const float qScale = 2.0f;
1200 const int32_t qOffset = 0;
1201
1202 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1203 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1204
1205 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
1206 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1207 qOffset, std::vector<float>(
1208 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1209 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
1210
1211 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
1212 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1213 qOffset, std::vector<float>(
1214 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1215 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1216 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1217 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1218 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1219 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
1220
1221 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
1222 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1223}
1224
1225LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
1226 armnn::IWorkloadFactory& workloadFactory,
1227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1228{
1229 const float qScale = 1.0f;
1230 const int32_t qOffset = 0;
1231
1232 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
1233
1234 armnn::TensorInfo inputDesc({2, 2}, datatype);
1235 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1236 qOffset, std::vector<float>{2., 3., 3., 4.}));
1237
1238 armnn::TensorInfo outputDesc({2, 4}, datatype);
1239 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1240 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1241 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1242
1243 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1244 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
1245}
1246
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001247LayerTestResult<float,3> MergerTest(
1248 armnn::IWorkloadFactory& workloadFactory,
1249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001250{
surmeh013537c2c2018-05-18 16:31:43 +01001251 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001252 unsigned int outputHeight = 6;
1253 unsigned int outputChannels = 3;
1254
surmeh013537c2c2018-05-18 16:31:43 +01001255 unsigned int inputWidth1 = 3;
1256 unsigned int inputHeight1 = 6;
1257 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001258
surmeh013537c2c2018-05-18 16:31:43 +01001259 unsigned int inputWidth2 = 3;
1260 unsigned int inputHeight2 = 6;
1261 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001262
telsoa01c577f2c2018-08-31 09:22:23 +01001263 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001264 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1265 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1266 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001267
1268 LayerTestResult<float,3> ret(outputTensorInfo);
1269
telsoa014fcda012018-03-09 14:13:49 +00001270 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001271 {
1272 1.0f, 2.0f, 3.0f,
1273 4.0f, 5.0f, 6.0f,
1274 7.0f, 8.0f, 9.0f,
1275 10.0f, 11.0f, 12.0f,
1276 13.0f, 14.0f, 15.0f,
1277 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001278
surmeh013537c2c2018-05-18 16:31:43 +01001279 19.0f, 20.0f, 21.0f,
1280 22.0f, 23.0f, 24.0f,
1281 25.0f, 26.0f, 27.0f,
1282 28.0f, 29.0f, 30.0f,
1283 31.0f, 32.0f, 33.0f,
1284 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001285
surmeh013537c2c2018-05-18 16:31:43 +01001286 37.0f, 38.0f, 39.0f,
1287 40.0f, 41.0f, 42.0f,
1288 43.0f, 44.0f, 45.0f,
1289 46.0f, 47.0f, 48.0f,
1290 49.0f, 50.0f, 51.0f,
1291 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001292 })
1293 );
1294
telsoa014fcda012018-03-09 14:13:49 +00001295 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1296 {
surmeh013537c2c2018-05-18 16:31:43 +01001297 1.0f, 2.0f, 3.0f,
1298 4.0f, 5.0f, 6.0f,
1299 7.0f, 8.0f, 9.0f,
1300 10.0f, 11.0f, 12.0f,
1301 13.0f, 14.0f, 15.0f,
1302 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001303
surmeh013537c2c2018-05-18 16:31:43 +01001304 19.0f, 20.0f, 21.0f,
1305 22.0f, 23.0f, 24.0f,
1306 25.0f, 26.0f, 27.0f,
1307 28.0f, 29.0f, 30.0f,
1308 31.0f, 32.0f, 33.0f,
1309 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001310 })
1311 );
1312
1313 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1314 {
surmeh013537c2c2018-05-18 16:31:43 +01001315 37.0f, 38.0f, 39.0f,
1316 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001317 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001318 46.0f, 47.0f, 48.0f,
1319 49.0f, 50.0f, 51.0f,
1320 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001321 })
1322 );
1323
telsoa01c577f2c2018-08-31 09:22:23 +01001324 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00001325 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
1326
telsoa01c577f2c2018-08-31 09:22:23 +01001327 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00001328 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
1329
telsoa014fcda012018-03-09 14:13:49 +00001330 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1331
1332 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1333
1334 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1335 subTensorsSupported ?
1336 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1337 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1338
1339 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1340 subTensorsSupported ?
1341 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1342 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1343
telsoa014fcda012018-03-09 14:13:49 +00001344 armnn::MergerQueueDescriptor data;
1345 armnn::WorkloadInfo info;
1346 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1347 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001348 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1349
1350 data.m_ViewOrigins.push_back(window1);
1351 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001352
1353 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
1354
1355 inputHandle1->Allocate();
1356 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001357 outputHandle->Allocate();
1358
1359 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1360 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001361
Derek Lambertif30f7d32019-04-09 10:25:02 +01001362 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001363 workload->Execute();
1364
1365 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1366
1367 return ret;
1368}
1369
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001370LayerTestResult<float,4> AdditionTest(
1371 armnn::IWorkloadFactory& workloadFactory,
1372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001373{
1374 unsigned int batchSize = 2;
1375 unsigned int channels = 2;
1376 unsigned int height = 2;
1377 unsigned int width = 3;
1378
1379 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1380 armnn::TensorInfo outputTensorInfo;
1381
1382 unsigned int shape[] = {batchSize, channels, height, width};
1383
1384 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1385 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1386 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1387
1388
1389 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1390 {
1391 0.0f, 2.0f, 1.0f,
1392 0.2f, 1.0f, 2.0f,
1393
1394 1.0f, 2.0f, 1.0f,
1395 0.2f, 1.0f, 2.0f,
1396
1397 0.0f, 2.0f, 1.0f,
1398 4.2f, 1.0f, 2.0f,
1399
1400 0.0f, 0.0f, 1.0f,
1401 0.2f, 1.0f, 2.0f,
1402 }));
1403
1404 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1405 {
1406 1.0f, 2.0f, 1.0f,
1407 0.0f, 1.0f, 2.0f,
1408
1409 1.0f, 2.0f, -2.0f,
1410 0.2f, 1.0f, 2.0f,
1411
1412 0.0f, 2.0f, 1.0f,
1413 4.2f, 0.0f, -3.0f,
1414
1415 0.0f, 0.0f, 1.0f,
1416 0.7f, 1.0f, 5.0f,
1417 }));
1418
1419 LayerTestResult<float,4> ret(outputTensorInfo);
1420 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1421 {
1422 1.0f, 4.0f, 2.0f,
1423 0.2f, 2.0f, 4.0f,
1424
1425 2.0f, 4.0f, -1.0f,
1426 0.4f, 2.0f, 4.0f,
1427
1428 0.0f, 4.0f, 2.0f,
1429 8.4f, 1.0f, -1.0f,
1430
1431 0.0f, 0.0f, 2.0f,
1432 0.9f, 2.0f, 7.0f,
1433 }));
1434
1435 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1436 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1437 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1438
1439 armnn::AdditionQueueDescriptor data;
1440 armnn::WorkloadInfo info;
1441 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1442 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1443 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1444
1445 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1446
1447 inputHandle1->Allocate();
1448 inputHandle2->Allocate();
1449 outputHandle->Allocate();
1450
1451 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1452 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1453
Derek Lambertif30f7d32019-04-09 10:25:02 +01001454 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001455 workload->Execute();
1456
1457 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1458
1459 return ret;
1460}
1461
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001462template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001463LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1464 armnn::IWorkloadFactory& workloadFactory,
1465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001466 float qScale,
1467 int32_t qOffset)
1468{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001469 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
1470 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
1471 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001472
1473 if (armnn::IsQuantizedType<T>())
1474 {
1475 inputTensorInfo1.SetQuantizationScale(qScale);
1476 inputTensorInfo1.SetQuantizationOffset(qOffset);
1477 inputTensorInfo2.SetQuantizationScale(qScale);
1478 inputTensorInfo2.SetQuantizationOffset(qOffset);
1479 outputTensorInfo.SetQuantizationScale(qScale);
1480 outputTensorInfo.SetQuantizationOffset(qOffset);
1481 }
1482
1483 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1484 {
1485 0.0f,
1486 1.0f,
1487
1488 2.0f,
1489 3.0f,
1490
1491 4.0f,
1492 5.0f,
1493 }));
1494
1495 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1496 {
1497 0.5f, 1.5f, 2.5f,
1498 3.5f, 4.5f, 5.5f,
1499 }));
1500
1501 LayerTestResult<T,4> ret(outputTensorInfo);
1502 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1503 {
1504 0.5f, 1.5f, 2.5f,
1505 4.5f, 5.5f, 6.5f,
1506
1507 2.5f, 3.5f, 4.5f,
1508 6.5f, 7.5f, 8.5f,
1509
1510 4.5f, 5.5f, 6.5f,
1511 8.5f, 9.5f, 10.5f,
1512 }));
1513
1514 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1515 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1516 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1517
1518 armnn::AdditionQueueDescriptor data;
1519 armnn::WorkloadInfo info;
1520 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1521 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1522 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1523
1524 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1525
1526 inputHandle1->Allocate();
1527 inputHandle2->Allocate();
1528 outputHandle->Allocate();
1529
1530 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1531 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1532
Derek Lambertif30f7d32019-04-09 10:25:02 +01001533 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001534 workload->Execute();
1535
1536 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1537
1538 return ret;
1539}
1540
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001541template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001542LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1543 armnn::IWorkloadFactory& workloadFactory,
1544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001545 float qScale,
1546 int32_t qOffset)
1547{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001548 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
1549 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
1550 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001551
1552 if (armnn::IsQuantizedType<T>())
1553 {
1554 inputTensorInfo1.SetQuantizationScale(qScale);
1555 inputTensorInfo1.SetQuantizationOffset(qOffset);
1556 inputTensorInfo2.SetQuantizationScale(qScale);
1557 inputTensorInfo2.SetQuantizationOffset(qOffset);
1558 outputTensorInfo.SetQuantizationScale(qScale);
1559 outputTensorInfo.SetQuantizationOffset(qOffset);
1560 }
1561
1562 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1563 {
1564 0.0f, 1.0f, 2.0f,
1565 3.0f, 4.0f, 5.0f,
1566 6.0f, 7.0f, 8.0f,
1567 9.0f, 10.0f, 11.0f,
1568 12.0f, 13.0f, 14.0f,
1569 15.0f, 16.0f, 17.0f,
1570 }));
1571
1572 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1573 {
1574 0.5f,
1575 }));
1576
1577 LayerTestResult<T,4> ret(outputTensorInfo);
1578 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1579 {
1580 0.5f, 1.5f, 2.5f,
1581 3.5f, 4.5f, 5.5f,
1582 6.5f, 7.5f, 8.5f,
1583 9.5f, 10.5f, 11.5f,
1584 12.5f, 13.5f, 14.5f,
1585 15.5f, 16.5f, 17.5f,
1586 }));
1587
1588 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1589 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1590 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1591
1592 armnn::AdditionQueueDescriptor data;
1593 armnn::WorkloadInfo info;
1594 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1595 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1596 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1597
1598 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1599
1600 inputHandle1->Allocate();
1601 inputHandle2->Allocate();
1602 outputHandle->Allocate();
1603
1604 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1605 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1606
Derek Lambertif30f7d32019-04-09 10:25:02 +01001607 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001608 workload->Execute();
1609
1610 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1611
1612 return ret;
1613}
1614
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001615LayerTestResult<float, 4> AdditionBroadcastTest(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001618{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001619 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
1620 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001621}
1622
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001623LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1624 armnn::IWorkloadFactory& workloadFactory,
1625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001626{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001627 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
1628 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001629}
1630
Sadik Armagan2999a022019-04-09 14:20:12 +01001631LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
1632 armnn::IWorkloadFactory& workloadFactory,
1633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1634{
1635 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
1636 workloadFactory, memoryManager, 2.f, 0);
1637}
1638
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001639LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1640 armnn::IWorkloadFactory& workloadFactory,
1641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001642{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001643 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
1644 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001645}
1646
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001647LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1648 armnn::IWorkloadFactory& workloadFactory,
1649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001650{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001651 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
1652 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001653}
1654
Sadik Armagan2999a022019-04-09 14:20:12 +01001655LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
1656 armnn::IWorkloadFactory& workloadFactory,
1657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1658{
1659 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
1660 workloadFactory, memoryManager, 0.1333333f, 0);
1661}
1662
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001663LayerTestResult<float,4> CompareAdditionTest(
1664 armnn::IWorkloadFactory& workloadFactory,
1665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1666 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001667{
1668 unsigned int batchSize = 4;
1669 unsigned int channels = 1;
1670 unsigned int height = 2;
1671 unsigned int width = 3;
1672
1673 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1674 armnn::TensorInfo outputTensorInfo;
1675
1676 unsigned int shape[] = {batchSize, channels, height, width};
1677
1678 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1679 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1680 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1681
1682 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1683 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1684
1685 LayerTestResult<float,4> ret(outputTensorInfo);
1686
1687 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1688 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1689 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1690
1691 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1692 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1693 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1694
1695 armnn::AdditionQueueDescriptor data;
1696 armnn::WorkloadInfo info;
1697 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1698 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1699 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1700
1701 armnn::AdditionQueueDescriptor refData = data;
1702 armnn::WorkloadInfo refInfo = info;
1703 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1704 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1705 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1706
1707 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1708 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1709
1710 inputHandle1->Allocate();
1711 inputHandle2->Allocate();
1712 outputHandle->Allocate();
1713 inputHandle1Ref->Allocate();
1714 inputHandle2Ref->Allocate();
1715 outputHandleRef->Allocate();
1716
1717 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1718 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1719 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1720 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1721
Derek Lambertif30f7d32019-04-09 10:25:02 +01001722 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001723 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01001724 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001725 workloadRef->Execute();
1726
1727 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1728 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1729
1730 return ret;
1731}
1732
surmeh01bceff2f2018-03-29 16:29:27 +01001733namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01001734template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001735LayerTestResult<T, 4> DivisionTestHelper(
1736 armnn::IWorkloadFactory& workloadFactory,
1737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1738 const unsigned int shape0[4],
1739 const std::vector<T>& values0,
1740 float scale0,
1741 int32_t offset0,
1742 const unsigned int shape1[4],
1743 const std::vector<T> & values1,
1744 float scale1,
1745 int32_t offset1,
1746 const unsigned int outShape[4],
1747 const std::vector<T> & outValues,
1748 float outScale,
1749 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001750{
Sadik Armagan2999a022019-04-09 14:20:12 +01001751 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
1752 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
1753 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001754
David Beck5cd01f32018-09-12 16:00:08 +01001755 inputTensorInfo0.SetQuantizationScale(scale0);
1756 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001757
David Beck5cd01f32018-09-12 16:00:08 +01001758 inputTensorInfo1.SetQuantizationScale(scale1);
1759 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001760
David Beck5cd01f32018-09-12 16:00:08 +01001761 outputTensorInfo.SetQuantizationScale(outScale);
1762 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001763
David Beck5cd01f32018-09-12 16:00:08 +01001764 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1765 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001766
David Beck5cd01f32018-09-12 16:00:08 +01001767 LayerTestResult<T, 4> result(outputTensorInfo);
1768 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001769
David Beck5cd01f32018-09-12 16:00:08 +01001770 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1771 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1772 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001773
David Beck5cd01f32018-09-12 16:00:08 +01001774 armnn::DivisionQueueDescriptor data;
1775 armnn::WorkloadInfo info;
1776 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1777 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1778 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001779
David Beck5cd01f32018-09-12 16:00:08 +01001780 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001781
David Beck5cd01f32018-09-12 16:00:08 +01001782 inputHandle0->Allocate();
1783 inputHandle1->Allocate();
1784 outputHandle->Allocate();
1785
1786 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1787 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1788
Derek Lambertif30f7d32019-04-09 10:25:02 +01001789 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01001790 workload->Execute();
1791
1792 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1793
1794 return result;
1795}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001796} // anonymous namespace
1797
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001798LayerTestResult<float,4> DivisionByZeroTest(
1799 armnn::IWorkloadFactory& workloadFactory,
1800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001801{
1802 const unsigned int width = 2;
1803 const unsigned int height = 2;
1804 const unsigned int channelCount = 2;
1805 const unsigned int batchSize = 2;
1806
1807 unsigned int shape[] = { batchSize, channelCount, height, width };
1808
1809 std::vector<float> input0({
1810 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1811 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1812
1813 std::vector<float> input1({
1814 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1815 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1816
1817 std::vector<float> output({
1818 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1819 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1820
Sadik Armagan2999a022019-04-09 14:20:12 +01001821 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1822 memoryManager,
1823 shape, input0, 1.0f, 0,
1824 shape, input1, 1.0f, 0,
1825 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001826}
1827
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001828LayerTestResult<float,4> DivisionTest(
1829 armnn::IWorkloadFactory& workloadFactory,
1830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001831{
1832 const unsigned int width = 2;
1833 const unsigned int height = 2;
1834 const unsigned int channelCount = 2;
1835 const unsigned int batchSize = 2;
1836
1837 unsigned int shape[] = { batchSize, channelCount, height, width };
1838
1839 std::vector<float> input0({
1840 2, 2, 2, 2, 3, 3, 3, 3,
1841 4, 4, 4, 4, 5, 5, 5, 5 });
1842
1843 std::vector<float> input1({
1844 1, 1, 1, 1, 2, 2, 2, 2,
1845 4, 4, 4, 4, 4, 4, 4, 4 });
1846
1847 std::vector<float> output({
1848 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1849 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1850
David Beck5cd01f32018-09-12 16:00:08 +01001851
Sadik Armagan2999a022019-04-09 14:20:12 +01001852 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1853 memoryManager,
1854 shape, input0, 1.0f, 0,
1855 shape, input1, 1.0f, 0,
1856 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001857}
1858
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001859LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1860 armnn::IWorkloadFactory& workloadFactory,
1861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001862{
1863 unsigned int shape0[] = { 1, 2, 2, 2 };
1864 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1865
1866 unsigned int shape1[] = { 1, 1, 1, 1 };
1867 std::vector<float> input1({ 2 });
1868
1869 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1870
David Beck5cd01f32018-09-12 16:00:08 +01001871
Sadik Armagan2999a022019-04-09 14:20:12 +01001872 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1873 memoryManager,
1874 shape0, input0, 1.0f, 0,
1875 shape1, input1, 1.0f, 0,
1876 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001877}
1878
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001879LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1880 armnn::IWorkloadFactory& workloadFactory,
1881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001882{
1883 unsigned int shape0[] = { 1, 3, 3, 2 };
1884 std::vector<float> input0({
1885 1, 4, 3, 8, 5, 12,
1886 7, 16, 9, 20, 11, 24,
1887 13, 28, 15, 32, 17, 36});
1888
1889 unsigned int shape1[] = { 1, 1, 1, 2 };
1890 std::vector<float> input1({ 1, 2 });
1891
1892 std::vector<float> output({
1893 1, 2, 3, 4, 5, 6,
1894 7, 8, 9, 10, 11, 12,
1895 13, 14, 15, 16, 17, 18});
1896
Sadik Armagan2999a022019-04-09 14:20:12 +01001897 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1898 memoryManager,
1899 shape0, input0, 1.0f, 0,
1900 shape1, input1, 1.0f, 0,
1901 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001902}
1903
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001904LayerTestResult<uint8_t,4> DivisionUint8Test(
1905 armnn::IWorkloadFactory& workloadFactory,
1906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001907{
1908 const unsigned int width = 2;
1909 const unsigned int height = 2;
1910 const unsigned int channelCount = 2;
1911 const unsigned int batchSize = 2;
1912
1913 unsigned int shape[] = { batchSize, channelCount, height, width };
1914
1915 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1916 4, 4, 4, 4, 5, 5, 5, 5 });
1917
1918 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1919 4, 4, 4, 4, 4, 4, 4, 4 });
1920
1921 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1922 4, 4, 4, 4, 5, 5, 5, 5});
1923
1924
Sadik Armagan2999a022019-04-09 14:20:12 +01001925 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1926 memoryManager,
1927 shape, input0, 1.0f, 0,
1928 shape, input1, 1.0f, 0,
1929 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001930}
1931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001932LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1933 armnn::IWorkloadFactory& workloadFactory,
1934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001935{
1936 unsigned int shape0[] = { 1, 2, 2, 2 };
1937 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1938
1939 unsigned int shape1[] = { 1, 1, 1, 1 };
1940 std::vector<uint8_t> input1({ 2 });
1941
1942 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1943
Sadik Armagan2999a022019-04-09 14:20:12 +01001944 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1945 memoryManager,
1946 shape0, input0, 1.0f, 0,
1947 shape1, input1, 1.0f, 0,
1948 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001949}
1950
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001951LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1952 armnn::IWorkloadFactory& workloadFactory,
1953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001954{
1955 unsigned int shape0[] = { 1, 3, 3, 2 };
1956 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1957 7, 16, 9, 20, 11, 24,
1958 13, 28, 15, 32, 17, 36});
1959
1960 unsigned int shape1[] = { 1, 1, 1, 2 };
1961 std::vector<uint8_t> input1({ 1, 2 });
1962
1963 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1964 7, 8, 9, 10, 11, 12,
1965 13, 14, 15, 16, 17, 18});
1966
Sadik Armagan2999a022019-04-09 14:20:12 +01001967 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1968 memoryManager,
1969 shape0, input0, 1.0f, 0,
1970 shape1, input1, 1.0f, 0,
1971 shape0, output, 1.0f, 0);
1972}
1973
1974LayerTestResult<int16_t,4> DivisionInt16Test(
1975 armnn::IWorkloadFactory& workloadFactory,
1976 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1977{
1978 unsigned int shape[] = { 2, 2, 2, 2 };
1979
1980 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1981 4, 4, 4, 4, 5, 5, 5, 5 });
1982
1983 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1984 4, 4, 4, 4, 4, 4, 4, 4 });
1985
1986 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1987 4, 4, 4, 4, 5, 5, 5, 5});
1988
1989
1990 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
1991 memoryManager,
1992 shape, input0, 1.0f, 0,
1993 shape, input1, 1.0f, 0,
1994 shape, output, 0.25f, 0);
1995}
1996
1997LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
1998 armnn::IWorkloadFactory& workloadFactory,
1999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2000{
2001 unsigned int shape0[] = { 1, 2, 2, 2 };
2002 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2003
2004 unsigned int shape1[] = { 1, 1, 1, 1 };
2005 std::vector<int16_t> input1({ 2 });
2006
2007 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2008
2009 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2010 memoryManager,
2011 shape0, input0, 1.0f, 0,
2012 shape1, input1, 1.0f, 0,
2013 shape0, output, 1.0f, 0);
2014}
2015
2016LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2017 armnn::IWorkloadFactory& workloadFactory,
2018 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2019{
2020 unsigned int shape0[] = { 1, 3, 3, 2 };
2021 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2022 7, 16, 9, 20, 11, 24,
2023 13, 28, 15, 32, 17, 36});
2024
2025 unsigned int shape1[] = { 1, 1, 1, 2 };
2026 std::vector<int16_t> input1({ 1, 2 });
2027
2028 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2029 7, 8, 9, 10, 11, 12,
2030 13, 14, 15, 16, 17, 18});
2031
2032 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2033 memoryManager,
2034 shape0, input0, 1.0f, 0,
2035 shape1, input1, 1.0f, 0,
2036 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002037}
2038
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002039template<typename DescriptorType>
2040std::unique_ptr<armnn::IWorkload> CreateWorkload(
2041 const armnn::IWorkloadFactory& workloadFactory,
2042 const armnn::WorkloadInfo& info,
2043 const DescriptorType& descriptor)
2044{
2045 return CreateWorkload(workloadFactory, info, descriptor);
2046};
2047
2048template<>
2049std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2050 const armnn::IWorkloadFactory& workloadFactory,
2051 const armnn::WorkloadInfo& info,
2052 const armnn::MaximumQueueDescriptor& descriptor)
2053{
2054 return workloadFactory.CreateMaximum(descriptor, info);
2055}
2056
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002057template<>
2058std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2059 const armnn::IWorkloadFactory& workloadFactory,
2060 const armnn::WorkloadInfo& info,
2061 const armnn::MinimumQueueDescriptor& descriptor)
2062{
2063 return workloadFactory.CreateMinimum(descriptor, info);
2064}
2065
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002066template<>
2067std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2068 const armnn::IWorkloadFactory& workloadFactory,
2069 const armnn::WorkloadInfo& info,
2070 const armnn::EqualQueueDescriptor& descriptor)
2071{
2072 return workloadFactory.CreateEqual(descriptor, info);
2073}
2074
FrancisMurtagh878f0232018-12-19 10:56:15 +00002075template<>
2076std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2077 const armnn::IWorkloadFactory& workloadFactory,
2078 const armnn::WorkloadInfo& info,
2079 const armnn::GreaterQueueDescriptor& descriptor)
2080{
2081 return workloadFactory.CreateGreater(descriptor, info);
2082}
2083
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002084namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002085
2086template <typename Descriptor,
2087 armnn::DataType ArmnnTypeInput,
2088 armnn::DataType ArmnnTypeOutput,
2089 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2090 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2091LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2092 armnn::IWorkloadFactory & workloadFactory,
2093 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2094 const unsigned int shape0[4], std::vector<TInput> values0,
2095 const unsigned int shape1[4], std::vector<TInput> values1,
2096 const unsigned int outShape[4], std::vector<TOutput> outValues,
2097 float qScale = 0.0f, int qOffset = 0)
2098{
2099 const size_t dimensionCount = 4;
2100 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2101 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2102 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2103
2104 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2105 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2106
2107 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002108 {
kevmay012b4d88e2019-01-24 14:05:09 +00002109 inputTensorInfo0.SetQuantizationScale(qScale);
2110 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002111
kevmay012b4d88e2019-01-24 14:05:09 +00002112 inputTensorInfo1.SetQuantizationScale(qScale);
2113 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002114
kevmay012b4d88e2019-01-24 14:05:09 +00002115 outputTensorInfo.SetQuantizationScale(qScale);
2116 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002117 }
kevmay012b4d88e2019-01-24 14:05:09 +00002118
2119 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2120
2121 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2122 {
2123 ret.compareBoolean = true;
2124 }
2125
2126 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2127 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2128 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2129
2130 Descriptor data;
2131 armnn::WorkloadInfo info;
2132 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2133 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2134 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2135 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2136
2137 inputHandle0->Allocate();
2138 inputHandle1->Allocate();
2139 outputHandle->Allocate();
2140
2141 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2142 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2143
Derek Lambertif30f7d32019-04-09 10:25:02 +01002144 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002145 ExecuteWorkload(*workload, memoryManager);
2146
2147 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2148
2149 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2150 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002151}
2152
kevmay012b4d88e2019-01-24 14:05:09 +00002153template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2154LayerTestResult<T, 4> ElementwiseTestHelper(
2155 armnn::IWorkloadFactory & workloadFactory,
2156 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2157 const unsigned int shape0[4], std::vector<T> values0,
2158 const unsigned int shape1[4], std::vector<T> values1,
2159 const unsigned int outShape[4], std::vector<T> outValues,
2160 float qScale = 0.0f, int qOffset = 0)
2161{
2162 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2163 (workloadFactory,
2164 memoryManager,
2165 shape0,
2166 values0,
2167 shape1,
2168 values1,
2169 outShape,
2170 outValues,
2171 qScale,
2172 qOffset);
2173}
2174}
2175
2176LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2177 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002178{
2179 const unsigned int width = 2;
2180 const unsigned int height = 2;
2181 const unsigned int channelCount = 2;
2182 const unsigned int batchSize = 2;
2183
2184 unsigned int shape[] = { batchSize, channelCount, height, width };
2185
2186 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2187 3, 3, 3, 3, 4, 4, 4, 4 });
2188
2189 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2190 5, 5, 5, 5, 4, 4, 4, 4 });
2191
kevmay012b4d88e2019-01-24 14:05:09 +00002192 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2193 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002194
kevmay012b4d88e2019-01-24 14:05:09 +00002195 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002196 workloadFactory,
2197 memoryManager,
2198 shape,
2199 input0,
2200 shape,
2201 input1,
2202 shape,
2203 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002204}
2205
kevmay012b4d88e2019-01-24 14:05:09 +00002206LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002207 armnn::IWorkloadFactory& workloadFactory,
2208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2209{
2210 unsigned int shape0[] = { 1, 2, 2, 2 };
2211 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2212
2213 unsigned int shape1[] = { 1, 1, 1, 1 };
2214 std::vector<float> input1({ 1 });
2215
kevmay012b4d88e2019-01-24 14:05:09 +00002216 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002217
kevmay012b4d88e2019-01-24 14:05:09 +00002218 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002219 workloadFactory,
2220 memoryManager,
2221 shape0,
2222 input0,
2223 shape1,
2224 input1,
2225 shape0,
2226 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002227}
2228
kevmay012b4d88e2019-01-24 14:05:09 +00002229LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002230 armnn::IWorkloadFactory& workloadFactory,
2231 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2232{
2233 const unsigned int shape0[] = { 1, 2, 2, 3 };
2234 const unsigned int shape1[] = { 1, 1, 1, 3 };
2235
2236 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2237 7, 8, 9, 10, 11, 12 });
2238
2239 std::vector<float> input1({ 1, 2, 3});
2240
kevmay012b4d88e2019-01-24 14:05:09 +00002241 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2242 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002243
kevmay012b4d88e2019-01-24 14:05:09 +00002244 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002245 workloadFactory,
2246 memoryManager,
2247 shape0,
2248 input0,
2249 shape1,
2250 input1,
2251 shape0,
2252 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002253}
2254
2255LayerTestResult<uint8_t, 4> EqualUint8Test(
2256 armnn::IWorkloadFactory& workloadFactory,
2257 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2258{
2259 unsigned int shape[] = { 2, 2, 2, 2 };
2260
2261 // See dequantized values to the right.
2262 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002263 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002264
2265 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2266 3, 3, 3, 3, 5, 5, 5, 5 });
2267
2268 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2269 1, 1, 1, 1, 0, 0, 0, 0 });
2270
kevmay012b4d88e2019-01-24 14:05:09 +00002271 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2272 armnn::DataType::QuantisedAsymm8,
2273 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002274 workloadFactory,
2275 memoryManager,
2276 shape,
2277 input0,
2278 shape,
2279 input1,
2280 shape,
2281 output,
2282 1.0f,
2283 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002284}
2285
2286LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2287 armnn::IWorkloadFactory& workloadFactory,
2288 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2289{
2290 const unsigned int shape0[] = { 1, 2, 2, 3 };
2291 const unsigned int shape1[] = { 1, 1, 1, 1 };
2292
2293 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2294 7, 8, 9, 10, 11, 12 });
2295
2296 std::vector<uint8_t> input1({ 1 });
2297
2298 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2299 0, 0, 0, 0, 0, 0 });
2300
kevmay012b4d88e2019-01-24 14:05:09 +00002301 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2302 armnn::DataType::QuantisedAsymm8,
2303 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002304 workloadFactory,
2305 memoryManager,
2306 shape0,
2307 input0,
2308 shape1,
2309 input1,
2310 shape0,
2311 output,
2312 1.0f,
2313 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002314}
2315
2316LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2317 armnn::IWorkloadFactory& workloadFactory,
2318 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2319{
2320 const unsigned int shape0[] = { 1, 2, 2, 3 };
2321 const unsigned int shape1[] = { 1, 1, 1, 3 };
2322
2323 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2324 7, 8, 9, 10, 11, 12 });
2325
2326 std::vector<uint8_t> input1({ 1, 1, 3});
2327
2328 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2329 0, 0, 0, 0, 0, 0 });
2330
kevmay012b4d88e2019-01-24 14:05:09 +00002331 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2332 armnn::DataType::QuantisedAsymm8,
2333 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002334 workloadFactory,
2335 memoryManager,
2336 shape0,
2337 input0,
2338 shape1,
2339 input1,
2340 shape0,
2341 output,
2342 1.0f,
2343 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002344}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002345
kevmay012b4d88e2019-01-24 14:05:09 +00002346LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2348{
2349 const unsigned int width = 2;
2350 const unsigned int height = 2;
2351 const unsigned int channelCount = 2;
2352 const unsigned int batchSize = 2;
2353
2354 unsigned int shape[] = { batchSize, channelCount, height, width };
2355
2356 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2357 3, 3, 3, 3, 4, 4, 4, 4 });
2358
2359 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2360 5, 5, 5, 5, 4, 4, 4, 4 });
2361
kevmay012b4d88e2019-01-24 14:05:09 +00002362 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2363 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002364
kevmay012b4d88e2019-01-24 14:05:09 +00002365 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002366 workloadFactory,
2367 memoryManager,
2368 shape,
2369 input0,
2370 shape,
2371 input1,
2372 shape,
2373 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002374}
2375
kevmay012b4d88e2019-01-24 14:05:09 +00002376LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002377 armnn::IWorkloadFactory& workloadFactory,
2378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2379{
2380 unsigned int shape0[] = { 1, 2, 2, 2 };
2381 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2382
2383 unsigned int shape1[] = { 1, 1, 1, 1 };
2384 std::vector<float> input1({ 1 });
2385
kevmay012b4d88e2019-01-24 14:05:09 +00002386 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002387
kevmay012b4d88e2019-01-24 14:05:09 +00002388 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002389 workloadFactory,
2390 memoryManager,
2391 shape0,
2392 input0,
2393 shape1,
2394 input1,
2395 shape0,
2396 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002397}
2398
kevmay012b4d88e2019-01-24 14:05:09 +00002399LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002400 armnn::IWorkloadFactory& workloadFactory,
2401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2402{
2403 const unsigned int shape0[] = { 1, 2, 2, 3 };
2404 const unsigned int shape1[] = { 1, 1, 1, 3 };
2405
2406 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2407 7, 8, 9, 10, 11, 12 });
2408
2409 std::vector<float> input1({ 1, 3, 2});
2410
kevmay012b4d88e2019-01-24 14:05:09 +00002411 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2412 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002413
kevmay012b4d88e2019-01-24 14:05:09 +00002414 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002415 workloadFactory,
2416 memoryManager,
2417 shape0,
2418 input0,
2419 shape1,
2420 input1,
2421 shape0,
2422 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002423}
2424
2425LayerTestResult<uint8_t, 4> GreaterUint8Test(
2426 armnn::IWorkloadFactory& workloadFactory,
2427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2428{
2429 unsigned int shape[] = { 2, 2, 2, 2 };
2430
2431 // See dequantized values to the right.
2432 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2433 3, 3, 3, 3, 5, 5, 5, 5 });
2434
2435 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2436 2, 2, 2, 2, 5, 5, 5, 5 });
2437
2438 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
2439 1, 1, 1, 1, 0, 0, 0, 0 });
2440
kevmay012b4d88e2019-01-24 14:05:09 +00002441 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2442 armnn::DataType::QuantisedAsymm8,
2443 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002444 workloadFactory,
2445 memoryManager,
2446 shape,
2447 input0,
2448 shape,
2449 input1,
2450 shape,
2451 output,
2452 1.0f,
2453 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002454}
2455
2456LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
2457 armnn::IWorkloadFactory& workloadFactory,
2458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2459{
2460 const unsigned int shape0[] = { 1, 2, 2, 3 };
2461 const unsigned int shape1[] = { 1, 1, 1, 1 };
2462
2463 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2464 7, 8, 9, 10, 11, 12 });
2465
2466 std::vector<uint8_t> input1({ 1 });
2467
2468 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
2469 1, 1, 1, 1, 1, 1 });
2470
kevmay012b4d88e2019-01-24 14:05:09 +00002471 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2472 armnn::DataType::QuantisedAsymm8,
2473 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002474 workloadFactory,
2475 memoryManager,
2476 shape0,
2477 input0,
2478 shape1,
2479 input1,
2480 shape0,
2481 output,
2482 1.0f,
2483 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002484}
2485
2486LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
2487 armnn::IWorkloadFactory& workloadFactory,
2488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2489{
2490 const unsigned int shape0[] = { 1, 2, 2, 3 };
2491 const unsigned int shape1[] = { 1, 1, 1, 3 };
2492
2493 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2494 7, 8, 9, 10, 11, 12 });
2495
2496 std::vector<uint8_t> input1({ 1, 1, 3});
2497
2498 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
2499 1, 1, 1, 1, 1, 1 });
2500
kevmay012b4d88e2019-01-24 14:05:09 +00002501 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2502 armnn::DataType::QuantisedAsymm8,
2503 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002504 workloadFactory,
2505 memoryManager,
2506 shape0,
2507 input0,
2508 shape1,
2509 input1,
2510 shape0,
2511 output,
2512 1.0f,
2513 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002514}
2515
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002516LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2518{
2519 const unsigned int width = 2;
2520 const unsigned int height = 2;
2521 const unsigned int channelCount = 2;
2522 const unsigned int batchSize = 2;
2523
2524 unsigned int shape[] = { batchSize, channelCount, height, width };
2525
2526 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2527 3, 3, 3, 3, 4, 4, 4, 4 });
2528
2529 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2530 4, 4, 4, 4, 5, 5, 5, 5 });
2531
2532 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
2533 4, 4, 4, 4, 5, 5, 5, 5 });
2534
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002535 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2536 workloadFactory,
2537 memoryManager,
2538 shape,
2539 input0,
2540 shape,
2541 input1,
2542 shape,
2543 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002544}
2545
2546LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
2547 armnn::IWorkloadFactory& workloadFactory,
2548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2549{
2550 unsigned int shape0[] = { 1, 2, 2, 2 };
2551 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2552
2553 unsigned int shape1[] = { 1, 1, 1, 1 };
2554 std::vector<float> input1({ 2 });
2555
2556 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
2557
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002558 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2559 workloadFactory,
2560 memoryManager,
2561 shape0,
2562 input0,
2563 shape1,
2564 input1,
2565 shape0,
2566 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002567}
2568
2569LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
2570 armnn::IWorkloadFactory& workloadFactory,
2571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2572{
2573 const unsigned int shape0[] = { 1, 2, 2, 3 };
2574 const unsigned int shape1[] = { 1, 1, 1, 3 };
2575
2576 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2577 7, 8, 9, 10, 11, 12 });
2578
2579 std::vector<float> input1({ 1, 2, 3});
2580
2581 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002582 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002583
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002584 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2585 workloadFactory,
2586 memoryManager,
2587 shape0,
2588 input0,
2589 shape1,
2590 input1,
2591 shape0,
2592 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002593}
2594
2595LayerTestResult<uint8_t, 4> MaximumUint8Test(
2596 armnn::IWorkloadFactory& workloadFactory,
2597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2598{
2599 unsigned int shape[] = { 2, 2, 2, 2 };
2600
2601 // See dequantized values to the right.
2602 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2603 3, 3, 3, 3, 4, 4, 4, 4 });
2604
2605 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2606 4, 4, 4, 4, 5, 5, 5, 5 });
2607
2608 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2609 4, 4, 4, 4, 5, 5, 5, 5 });
2610
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002611 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2612 workloadFactory,
2613 memoryManager,
2614 shape,
2615 input0,
2616 shape,
2617 input1,
2618 shape,
2619 output,
2620 1.0f,
2621 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002622}
2623
2624LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2625 armnn::IWorkloadFactory& workloadFactory,
2626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2627{
2628 const unsigned int shape0[] = { 1, 2, 2, 3 };
2629 const unsigned int shape1[] = { 1, 1, 1, 1 };
2630
2631 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2632 7, 8, 9, 10, 11, 12 });
2633
2634 std::vector<uint8_t> input1({2});
2635
2636 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2637 7, 8, 9, 10, 11, 12 });
2638
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002639 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2640 workloadFactory,
2641 memoryManager,
2642 shape0,
2643 input0,
2644 shape1,
2645 input1,
2646 shape0,
2647 output,
2648 1.0f,
2649 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002650}
2651
2652LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2653 armnn::IWorkloadFactory& workloadFactory,
2654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2655{
2656 const unsigned int shape0[] = { 1, 2, 2, 3 };
2657 const unsigned int shape1[] = { 1, 1, 1, 3 };
2658
2659 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2660 7, 8, 9, 10, 11, 12 });
2661
2662 std::vector<uint8_t> input1({ 1, 10, 3});
2663
2664 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2665 7, 10, 9, 10, 11, 12 });
2666
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002667 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2668 workloadFactory,
2669 memoryManager,
2670 shape0,
2671 input0,
2672 shape1,
2673 input1,
2674 shape0,
2675 output,
2676 1.0f,
2677 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002678}
2679
Sadik Armagan2999a022019-04-09 14:20:12 +01002680LayerTestResult<int16_t, 4> MaximumInt16Test(
2681 armnn::IWorkloadFactory& workloadFactory,
2682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2683{
2684 unsigned int shape[] = { 2, 2, 2, 2 };
2685
2686 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2687 3, 3, 3, 3, 4, 4, 4, 4 });
2688
2689 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2690 4, 4, 4, 4, 5, 5, 5, 5 });
2691
2692 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2693 4, 4, 4, 4, 5, 5, 5, 5 });
2694
2695 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2696 workloadFactory,
2697 memoryManager,
2698 shape,
2699 input0,
2700 shape,
2701 input1,
2702 shape,
2703 output,
2704 1.0f,
2705 0);
2706}
2707
2708LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
2709 armnn::IWorkloadFactory& workloadFactory,
2710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2711{
2712 const unsigned int shape0[] = { 1, 2, 2, 3 };
2713 const unsigned int shape1[] = { 1, 1, 1, 1 };
2714
2715 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2716 7, 8, 9, 10, 11, 12 });
2717
2718 std::vector<int16_t> input1({2});
2719
2720 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
2721 7, 8, 9, 10, 11, 12 });
2722
2723 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2724 workloadFactory,
2725 memoryManager,
2726 shape0,
2727 input0,
2728 shape1,
2729 input1,
2730 shape0,
2731 output,
2732 1.0f,
2733 0);
2734}
2735
2736LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
2737 armnn::IWorkloadFactory& workloadFactory,
2738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2739{
2740 const unsigned int shape0[] = { 1, 2, 2, 3 };
2741 const unsigned int shape1[] = { 1, 1, 1, 3 };
2742
2743 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2744 7, 8, 9, 10, 11, 12 });
2745
2746 std::vector<int16_t> input1({ 1, 10, 3});
2747
2748 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
2749 7, 10, 9, 10, 11, 12 });
2750
2751 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2752 workloadFactory,
2753 memoryManager,
2754 shape0,
2755 input0,
2756 shape1,
2757 input1,
2758 shape0,
2759 output,
2760 1.0f,
2761 0);
2762}
2763
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002764LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
2765 armnn::IWorkloadFactory& workloadFactory,
2766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2767{
2768 unsigned int shape0[] = { 1, 2, 2, 2 };
2769 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2770
2771 unsigned int shape1[] = { 1, 1, 1, 1 };
2772 std::vector<float> input1({ 2 });
2773
2774 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
2775
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002776 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2777 workloadFactory,
2778 memoryManager,
2779 shape0,
2780 input0,
2781 shape1,
2782 input1,
2783 shape0,
2784 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002785}
2786
2787
2788LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
2789 armnn::IWorkloadFactory& workloadFactory,
2790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2791{
2792 unsigned int shape0[] = { 1, 2, 2, 2 };
2793 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
2794
2795 unsigned int shape1[] = { 1, 1, 1, 1 };
2796 std::vector<float> input1({ 5 });
2797
2798 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
2799
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002800 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2801 workloadFactory,
2802 memoryManager,
2803 shape0,
2804 input0,
2805 shape1,
2806 input1,
2807 shape0,
2808 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002809}
2810
2811LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
2812 armnn::IWorkloadFactory & workloadFactory,
2813 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
2814{
2815 const unsigned int shape0[] = { 1, 2, 2, 3 };
2816 const unsigned int shape1[] = { 1, 1, 1, 3 };
2817
2818 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
2819 7, 1, 2, 3, 4, 5 });
2820
2821 std::vector<uint8_t> input1({ 1, 2, 3});
2822
2823 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
2824 1, 1, 2, 1, 2, 3 });
2825
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002826 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2827 workloadFactory,
2828 memoryManager,
2829 shape0,
2830 input0,
2831 shape1,
2832 input1,
2833 shape0,
2834 output,
2835 1.0f,
2836 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002837}
2838
Sadik Armagan2999a022019-04-09 14:20:12 +01002839LayerTestResult<int16_t, 4> MinimumInt16Test(
2840 armnn::IWorkloadFactory& workloadFactory,
2841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2842{
2843 unsigned int shape[] = { 2, 2, 2, 2 };
2844
2845 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2846 3, 3, 3, 3, 4, 4, 4, 4 });
2847
2848 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2849 4, 4, 4, 4, 5, 5, 5, 5 });
2850
2851 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
2852 3, 3, 3, 3, 4, 4, 4, 4 });
2853
2854 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2855 workloadFactory,
2856 memoryManager,
2857 shape,
2858 input0,
2859 shape,
2860 input1,
2861 shape,
2862 output,
2863 1.0f,
2864 0);
2865}
2866
2867LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
2868 armnn::IWorkloadFactory& workloadFactory,
2869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2870{
2871 const unsigned int shape0[] = { 1, 2, 2, 3 };
2872 const unsigned int shape1[] = { 1, 1, 1, 1 };
2873
2874 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2875 7, 8, 9, 10, 11, 12 });
2876
2877 std::vector<int16_t> input1({2});
2878
2879 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
2880 2, 2, 2, 2, 2, 2 });
2881
2882 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2883 workloadFactory,
2884 memoryManager,
2885 shape0,
2886 input0,
2887 shape1,
2888 input1,
2889 shape0,
2890 output,
2891 1.0f,
2892 0);
2893}
2894
2895LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
2896 armnn::IWorkloadFactory& workloadFactory,
2897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2898{
2899 const unsigned int shape0[] = { 1, 2, 2, 3 };
2900 const unsigned int shape1[] = { 1, 1, 1, 3 };
2901
2902 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2903 7, 8, 9, 10, 11, 12 });
2904
2905 std::vector<int16_t> input1({ 1, 10, 3});
2906
2907 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
2908 1, 8, 3, 1, 10, 3 });
2909
2910 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2911 workloadFactory,
2912 memoryManager,
2913 shape0,
2914 input0,
2915 shape1,
2916 input1,
2917 shape0,
2918 output,
2919 1.0f,
2920 0);
2921}
2922
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002923namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002924LayerTestResult<float,4> MultiplicationTestHelper(
2925 armnn::IWorkloadFactory& workloadFactory,
2926 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2927 const unsigned int shape0[4],
2928 const std::vector<float> & values0,
2929 const unsigned int shape1[4],
2930 const std::vector<float> & values1,
2931 const unsigned int outShape[4],
2932 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00002933{
surmeh01bceff2f2018-03-29 16:29:27 +01002934 const size_t dimensionCount = 4;
2935 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
2936 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
2937 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00002938
surmeh01bceff2f2018-03-29 16:29:27 +01002939 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
2940 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00002941
2942 LayerTestResult<float,4> ret(outputTensorInfo);
2943
2944 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2945 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2946 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2947
2948 armnn::MultiplicationQueueDescriptor data;
2949 armnn::WorkloadInfo info;
2950 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2951 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2952 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2953
2954 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2955
2956 inputHandle0->Allocate();
2957 inputHandle1->Allocate();
2958 outputHandle->Allocate();
2959
2960 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2961 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2962
Derek Lambertif30f7d32019-04-09 10:25:02 +01002963 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002964 workload->Execute();
2965
2966 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2967
surmeh01bceff2f2018-03-29 16:29:27 +01002968 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00002969 return ret;
2970}
surmeh01bceff2f2018-03-29 16:29:27 +01002971} // anonymous namespace
2972
2973
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002974LayerTestResult<float,4> MultiplicationTest(
2975 armnn::IWorkloadFactory& workloadFactory,
2976 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002977{
2978 const unsigned int width = 2;
2979 const unsigned int height = 2;
2980 const unsigned int channelCount = 2;
2981 const unsigned int batchSize = 2;
2982
2983 unsigned int shape[] = { batchSize, channelCount, height, width };
2984
2985 std::vector<float> input0({
2986 1, 1, 1, 1, 2, 2, 2, 2,
2987 3, 3, 3, 3, 4, 4, 4, 4 });
2988
2989 std::vector<float> input1({
2990 2, 2, 2, 2, 3, 3, 3, 3,
2991 4, 4, 4, 4, 5, 5, 5, 5 });
2992
2993 std::vector<float> output({
2994 2, 2, 2, 2, 6, 6, 6, 6,
2995 12, 12, 12, 12, 20, 20, 20, 20 });
2996
2997 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002998 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002999 shape,
3000 input0,
3001 shape,
3002 input1,
3003 shape,
3004 output);
3005}
3006
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003007LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3008 armnn::IWorkloadFactory& workloadFactory,
3009 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003010{
3011 unsigned int shape0[] = { 1, 2, 2, 2 };
3012 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3013
3014 unsigned int shape1[] = { 1, 1, 1, 1 };
3015 std::vector<float> input1({ 2 });
3016
3017 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3018
3019 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003020 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003021 shape0,
3022 input0,
3023 shape1,
3024 input1,
3025 shape0,
3026 output);
3027}
3028
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003029LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3030 armnn::IWorkloadFactory& workloadFactory,
3031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003032{
3033 unsigned int shape0[] = { 1, 3, 3, 2 };
3034 std::vector<float> input0({
3035 1, 2, 3, 4, 5, 6,
3036 7, 8, 9, 10, 11, 12,
3037 13, 14, 15, 16, 17, 18});
3038
3039 unsigned int shape1[] = { 1, 1, 1, 2 };
3040 std::vector<float> input1({ 1, 2 });
3041
3042 std::vector<float> output({
3043 1, 4, 3, 8, 5, 12,
3044 7, 16, 9, 20, 11, 24,
3045 13, 28, 15, 32, 17, 36});
3046
3047 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003048 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003049 shape0,
3050 input0,
3051 shape1,
3052 input1,
3053 shape0,
3054 output);
3055}
telsoa014fcda012018-03-09 14:13:49 +00003056
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003057LayerTestResult<float,4> CompareMultiplicationTest(
3058 armnn::IWorkloadFactory& workloadFactory,
3059 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3060 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003061{
3062 const unsigned int width = 16;
3063 const unsigned int height = 32;
3064 const unsigned int channelCount = 2;
3065 const unsigned int batchSize = 5;
3066
3067 armnn::TensorInfo inputTensorInfo0;
3068 armnn::TensorInfo inputTensorInfo1;
3069 armnn::TensorInfo outputTensorInfo;
3070
3071 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3072
3073 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3074 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3075 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3076
3077 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3078
3079 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3080 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3081
3082 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3083 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3084 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3085
3086 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3087 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3088 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3089
3090 armnn::MultiplicationQueueDescriptor data;
3091 armnn::WorkloadInfo info;
3092 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3093 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3094 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3095
3096 armnn::MultiplicationQueueDescriptor refData = data;
3097 armnn::WorkloadInfo refInfo = info;
3098 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3099 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3100 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3101
3102 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3103 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3104
3105 inputHandle0->Allocate();
3106 inputHandle1->Allocate();
3107 outputHandle->Allocate();
3108 inputHandle0Ref->Allocate();
3109 inputHandle1Ref->Allocate();
3110 outputHandleRef->Allocate();
3111
3112 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3113 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3114 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3115 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3116
Derek Lambertif30f7d32019-04-09 10:25:02 +01003117 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003118 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003119 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003120 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003121 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3122 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3123
3124 return comparisonResult;
3125}
3126
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003127LayerTestResult<float,4> CompareBatchNormTest(
3128 armnn::IWorkloadFactory& workloadFactory,
3129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3130 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003131{
3132 const unsigned int width = 2;
3133 const unsigned int height = 3;
3134 const unsigned int channels = 5;
3135 const unsigned int batchSize = 3;
3136
3137 armnn::TensorInfo inputTensorInfo;
3138 armnn::TensorInfo outputTensorInfo;
3139 armnn::TensorInfo tensorInfo;
3140
3141 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3142 constexpr unsigned int tensorShape[] = {channels};
3143
3144 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3145 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3146 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3147
3148 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3149
3150 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3151 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3152 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3153 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3154
3155 LayerTestResult<float,4> ret(outputTensorInfo);
3156
3157 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3158 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3159
3160 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3161 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3162
3163 armnn::BatchNormalizationQueueDescriptor data;
3164 armnn::WorkloadInfo info;
3165 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3166 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3167 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3168 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3169
3170 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
3171 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
3172 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
3173 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
3174
3175 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3176 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3177 data.m_Mean = &meanTensor;
3178 data.m_Variance = &varianceTensor;
3179 data.m_Beta = &betaTensor;
3180 data.m_Gamma = &gammaTensor;
3181 data.m_Parameters.m_Eps = 0.01f;
3182
3183 armnn::BatchNormalizationQueueDescriptor refData = data;
3184 armnn::WorkloadInfo refInfo = info;
3185 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3186 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3187
3188 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
3189 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
3190
3191 inputHandle->Allocate();
3192 outputHandle->Allocate();
3193 inputHandleRef->Allocate();
3194 outputHandleRef->Allocate();
3195
3196 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3197 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3198
Derek Lambertif30f7d32019-04-09 10:25:02 +01003199 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003200 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003201 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003202 workloadRef->Execute();
3203
3204 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3205 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3206
3207 return ret;
3208}
3209
surmeh013537c2c2018-05-18 16:31:43 +01003210template<typename T>
3211void PermuteTensorData(
3212 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003214 const armnn::PermutationVector& mappings,
3215 armnn::TensorInfo & inputTensorInfo,
3216 const T * inputData,
3217 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003218{
surmeh013537c2c2018-05-18 16:31:43 +01003219 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3220 if (inputData == nullptr)
3221 {
3222 // Nullptr is an error in the test. By returning without doing the concatenation
3223 // I expect the caller to fail the test. It still makes sense to report this as
3224 // an assert for Debug builds.
3225 return;
3226 }
telsoa014fcda012018-03-09 14:13:49 +00003227
surmeh013537c2c2018-05-18 16:31:43 +01003228 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3229
3230 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3231 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3232
3233 armnn::PermuteQueueDescriptor queueDescriptor;
3234 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3235 armnn::WorkloadInfo workloadInfo;
3236 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3237 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3238
3239 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3240
3241 inputHandle->Allocate();
3242 outputHandle->Allocate();
3243
3244 CopyDataToITensorHandle(inputHandle.get(), inputData);
3245
Derek Lambertif30f7d32019-04-09 10:25:02 +01003246 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01003247 workload->Execute();
3248
3249 outputData.resize(outputTensorInfo.GetNumElements());
3250 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3251 inputTensorInfo = outputTensorInfo;
3252}
3253
3254armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
3255 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3256 unsigned int concatDim)
3257{
telsoa014fcda012018-03-09 14:13:49 +00003258 std::vector<armnn::TensorShape> shapes;
3259 shapes.reserve(inputTensorInfos.size());
3260 for (const armnn::TensorInfo& it: inputTensorInfos)
3261 {
3262 shapes.push_back(it.GetShape());
3263 }
surmeh013537c2c2018-05-18 16:31:43 +01003264
3265 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
3266 shapes.end(),
3267 concatDim);
3268}
3269
3270//
narpra015cdda352018-11-19 15:30:27 +00003271// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
3272// In case of <4 dimensions we need to make sure that the concat dimensions are at least
3273// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01003274//
3275
3276bool NeedPermuteForConcat(
3277 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3278 unsigned int concatDim)
3279{
3280 // See note above. Additionally we expect the input shapes to have the
3281 // same number of dimensions.
3282 unsigned int nDimensions = 0;
3283
telsoa01c577f2c2018-08-31 09:22:23 +01003284 // Determine the number of dimensions as well as sanity check them
3285 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01003286 for (auto && tensorInfo : inputTensorInfos)
3287 {
3288 if (!nDimensions)
3289 {
3290 nDimensions = tensorInfo.GetShape().GetNumDimensions();
3291 }
3292 else
3293 {
3294 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
3295 "Input shapes must have the same number of dimensions");
3296 }
3297 }
3298
narpra015cdda352018-11-19 15:30:27 +00003299 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01003300}
3301
3302armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
3303{
3304 unsigned int numDims = inputShape.GetNumDimensions();
3305 if (numDims >= 3)
3306 {
3307 // Nothing to do if the inputShape has at least 3 dimensions.
3308 return inputShape;
3309 }
3310
3311 std::vector<unsigned int> newDims(size_t(3), 1u);
3312 unsigned int expandedBy = 3 - numDims;
3313 for (unsigned int i=0; i<numDims; ++i)
3314 {
3315 newDims[expandedBy+i] = inputShape[i];
3316 }
3317 return armnn::TensorShape(3u, &newDims[0]);
3318}
3319
3320void Generate3dPermuteVectorForConcat(
3321 unsigned int numDimensions,
3322 unsigned int & concatDim,
3323 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
3324{
3325 BOOST_ASSERT_MSG(numDimensions <= 3,
3326 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01003327 unsigned int expandedBy = 3 - numDimensions;
3328 unsigned int expandedConcatAxis = concatDim + expandedBy;
3329
3330 if (expandedConcatAxis == 2)
3331 {
3332 concatDim = 0;
3333 armnn::PermutationVector forwardPermutation({1, 2, 0});
3334 armnn::PermutationVector reversePermutation({2, 0, 1});
3335 permutations = std::make_pair(forwardPermutation, reversePermutation);
3336 }
3337 else if (expandedConcatAxis == 1)
3338 {
3339 concatDim = 0;
3340 armnn::PermutationVector forwardPermutation({2, 0, 1});
3341 armnn::PermutationVector reversePermutation({1, 2, 0});
3342 permutations = std::make_pair(forwardPermutation, reversePermutation);
3343 }
3344 else
3345 {
3346 BOOST_ASSERT(expandedConcatAxis == 0);
3347 concatDim = 0;
3348 }
3349}
3350
3351//
3352// Permute the input tensors so we can do a supported concatenation.
3353// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
3354// at the front. Finally this function tells what the output shape
3355// of the permuted concatenated tensor is going to be.
3356//
3357template <typename T>
3358void PermuteInputsForConcat(
3359 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003360 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003361 std::vector<armnn::TensorInfo> & inputTensorInfos,
3362 std::vector<T *> & inputData,
3363 std::vector<std::vector<T>> & inputDataStorage,
3364 armnn::PermutationVector & permuteVector,
3365 unsigned int & concatDim,
3366 armnn::TensorInfo & outputTensorInfo)
3367{
3368 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
3369 "Expecting more than one tensor to be concatenated here");
3370
3371 unsigned int numDims = 0;
3372 unsigned int nthInput = 0;
3373 const armnn::PermutationVector identity({0, 1, 2});
3374
3375 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
3376 std::make_pair(identity, identity);
3377
3378 inputDataStorage.resize(inputData.size());
3379
3380 for (auto && tensorInfo : inputTensorInfos)
3381 {
3382 if (numDims == 0)
3383 {
3384 numDims = tensorInfo.GetShape().GetNumDimensions();
3385 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00003386
telsoa01c577f2c2018-08-31 09:22:23 +01003387 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01003388 permuteVector = permutations.second;
3389 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
3390 "Test logic error, we don't need permutation, so we shouldn't arrive here");
3391 }
3392 else
3393 {
3394 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
3395 "All inputs must have the same number of dimensions");
3396 }
3397
3398 armnn::TensorInfo newTensorInfo = tensorInfo;
3399 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
3400
3401 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003402 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003403 permutations.first,
3404 newTensorInfo,
3405 inputData[nthInput],
3406 inputDataStorage[nthInput]);
3407
3408 inputData[nthInput] = inputDataStorage[nthInput].data();
3409 inputTensorInfos[nthInput] = newTensorInfo;
3410
3411 ++nthInput;
3412 }
3413
3414 outputTensorInfo.SetShape(
3415 armnnUtils::Permuted(
3416 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
3417 permutations.first));
3418}
3419
3420
3421//
3422// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01003423// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01003424// output.
3425//
3426template <typename T>
3427void PermuteOutputForConcat(
3428 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003430 const armnn::TensorInfo & tensorInfo,
3431 const armnn::PermutationVector & permuteVector,
3432 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
3433 T * data)
3434{
3435 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
3436 if (data == nullptr)
3437 {
3438 // Nullptr is an error in the test. By returning without doing the permutation
3439 // I expect the caller to fail the test. It still makes sense to report this as
3440 // an assert for Debug builds.
3441 return;
3442 }
3443
3444 armnn::TensorInfo resultTensorInfo = tensorInfo;
3445 std::vector<T> inputData(tensorInfo.GetNumElements());
3446 std::vector<T> outputData;
3447
3448 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
3449
3450 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003451 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003452 permuteVector,
3453 resultTensorInfo,
3454 &inputData[0],
3455 outputData);
3456
3457 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
3458}
3459
3460template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003461void Concatenate(
3462 armnn::IWorkloadFactory& workloadFactory,
3463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3464 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
3465 std::initializer_list<T *> inputsOrig,
3466 const armnn::TensorInfo& outputTensorInfoOrig,
3467 T * output,
narpra015cdda352018-11-19 15:30:27 +00003468 unsigned int concatDim,
3469 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01003470{
3471 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
3472 if (output == nullptr)
3473 {
3474 // Nullptr is an error in the test. By returning without doing the permutation
3475 // I expect the caller to fail the test. It still makes sense to report this as
3476 // an assert for Debug builds.
3477 return;
3478 }
3479
telsoa01c577f2c2018-08-31 09:22:23 +01003480 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01003481 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
3482 std::vector<T *> inputs = inputsOrig;
3483 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
3484
3485 armnn::PermutationVector permuteVector{0, 1, 2};
3486
telsoa01c577f2c2018-08-31 09:22:23 +01003487 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01003488 std::vector<std::vector<T>> tmpInputDataStorage;
3489
3490 const size_t inputCount = inputTensorInfos.size();
3491
3492 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
3493
3494 if (needPermuteForConcat)
3495 {
3496 //
3497 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01003498 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01003499 //
3500 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003501 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003502 inputTensorInfos,
3503 inputs,
3504 tmpInputDataStorage,
3505 permuteVector,
3506 concatDim,
3507 outputTensorInfo);
3508 }
3509
narpra015cdda352018-11-19 15:30:27 +00003510 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00003511
3512 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
3513 inputHandles.reserve(inputCount);
3514
narpra015cdda352018-11-19 15:30:27 +00003515 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3516
3517 armnn::MergerQueueDescriptor queueDescriptor;
3518 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
3519 queueDescriptor.m_Parameters = viewsDescriptor;
3520
3521 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003522 {
narpra015cdda352018-11-19 15:30:27 +00003523 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
3524 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
3525 {
3526 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
3527 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
3528 }
telsoa014fcda012018-03-09 14:13:49 +00003529
narpra015cdda352018-11-19 15:30:27 +00003530 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00003531
narpra015cdda352018-11-19 15:30:27 +00003532 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3533 for (unsigned int i = 0; i < inputCount; ++i)
3534 {
3535 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
3536 std::unique_ptr<armnn::ITensorHandle> inputHandle =
3537 subTensorsSupported ?
3538 workloadFactory.CreateSubTensorHandle(*outputHandle,
3539 inputTensorInfo.GetShape(),
3540 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
3541 workloadFactory.CreateTensorHandle(inputTensorInfo);
3542
3543 inputHandles.emplace_back(std::move(inputHandle));
3544 }
3545
telsoa014fcda012018-03-09 14:13:49 +00003546 }
narpra015cdda352018-11-19 15:30:27 +00003547 else
3548 {
3549 for (unsigned int i = 0; i < inputCount; ++i)
3550 {
3551 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
3552 inputHandles.emplace_back(std::move(inputHandle));
3553 }
3554 }
telsoa014fcda012018-03-09 14:13:49 +00003555
3556 for (unsigned int i = 0; i < inputCount; ++i)
3557 {
surmeh013537c2c2018-05-18 16:31:43 +01003558 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00003559 }
3560
3561 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3562
3563 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
3564
3565 for (auto& inputHandle : inputHandles)
3566 {
3567 inputHandle->Allocate();
3568 }
3569
3570 outputHandle->Allocate();
3571
3572 unsigned int nextInputId = 0;
3573 for (auto& inputHandle : inputHandles)
3574 {
surmeh013537c2c2018-05-18 16:31:43 +01003575 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
3576 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00003577 }
3578
Derek Lambertif30f7d32019-04-09 10:25:02 +01003579 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003580 workload->Execute();
3581
surmeh013537c2c2018-05-18 16:31:43 +01003582 if (needPermuteForConcat)
3583 {
3584 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003585 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003586 outputTensorInfo,
3587 permuteVector,
3588 std::move(outputHandle),
3589 output);
3590 }
3591 else
3592 {
3593 CopyDataFromITensorHandle(output, outputHandle.get());
3594 }
telsoa014fcda012018-03-09 14:13:49 +00003595}
3596
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003597template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003598LayerTestResult<T, 1> Concatenation1dTestImpl(
3599 armnn::IWorkloadFactory& workloadFactory,
3600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3601 float qScale,
3602 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003603{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003604 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003605
3606 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
3607 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
3608 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
3609
Jim Flynncbb66aa2019-05-15 13:03:54 +01003610 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003611
3612 LayerTestResult<T, 1> result(outputTensorInfo);
3613
3614 std::vector<T> output;
3615 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003616 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003617 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3618 { input0.data(), input1.data(), input2.data() },
3619 outputTensorInfo,
3620 output.data(),
3621 0,
3622 true);
telsoa014fcda012018-03-09 14:13:49 +00003623
3624 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
3625 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3626 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
3627 }));
3628
3629 return result;
3630}
3631
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003632LayerTestResult<float, 1> Concatenation1dTest(
3633 armnn::IWorkloadFactory& workloadFactory,
3634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003635{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003636 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003637}
3638
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003639template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003640LayerTestResult<T, 2> Concatenation2dTestImpl(
3641 armnn::IWorkloadFactory& workloadFactory,
3642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003643 const armnn::TensorInfo& outputTensorInfo,
3644 unsigned int dimension,
3645 const float qScale,
3646 const int32_t qOffset)
3647{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003648 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003649
3650 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3651 // Batch 0
3652 1.0f, 2.0f, 3.0f,
3653
3654 // Batch 1
3655 10.0f, 11.0f, 12.0f,
3656 }));
3657
3658 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3659 // Batch 0
3660 4.0f, 5.0f, 6.0f,
3661
3662 // Batch 1
3663 13.0f, 14.0f, 15.0f,
3664 }));
3665
3666 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3667 // Batch 0
3668 7.0f, 8.0f, 9.0f,
3669
3670 // Batch 1
3671 16.0f, 17.0f, 18.0f,
3672 }));
3673
3674 LayerTestResult<T, 2> result(outputTensorInfo);
3675
3676 std::vector<T> output;
3677 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003678 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003679 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3680 { input0.data(), input1.data(), input2.data() },
3681 outputTensorInfo,
3682 output.data(),
3683 dimension,
3684 true);
telsoa014fcda012018-03-09 14:13:49 +00003685
3686 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3687 return result;
3688}
3689
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003690template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003691LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
3692 armnn::IWorkloadFactory& workloadFactory,
3693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3694 float qScale,
3695 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003696{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003697 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003698
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003699 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3700 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
3701
telsoa014fcda012018-03-09 14:13:49 +00003702 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3703 // Batch 0
3704 1.0f, 2.0f, 3.0f,
3705
3706 // Batch 1
3707 10.0f, 11.0f, 12.0f,
3708
3709 // Batch 2
3710 4.0f, 5.0f, 6.0f,
3711
3712 // Batch 3
3713 13.0f, 14.0f, 15.0f,
3714
3715 // Batch 4
3716 7.0f, 8.0f, 9.0f,
3717
3718 // Batch 5
3719 16.0f, 17.0f, 18.0f,
3720 }));
3721
3722 return result;
3723}
3724
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003725LayerTestResult<float, 2> Concatenation2dDim0Test(
3726 armnn::IWorkloadFactory& workloadFactory,
3727 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003728{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003729 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003730}
3731
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003732template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003733LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
3734 armnn::IWorkloadFactory& workloadFactory,
3735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3736 float qScale,
3737 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003738{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003739 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003740
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003741 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3742 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
3743
telsoa014fcda012018-03-09 14:13:49 +00003744 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3745 // Batch 0
3746 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3747
3748 // Batch 1
3749 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
3750 }));
3751
3752 return result;
3753}
3754
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003755LayerTestResult<float, 2> Concatenation2dDim1Test(
3756 armnn::IWorkloadFactory& workloadFactory,
3757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003758{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003759 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003760}
3761
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003762template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003763LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
3764 armnn::IWorkloadFactory& workloadFactory,
3765 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3766 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003767 int32_t qOffset)
3768{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003769 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003770 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3771 // Batch 0
3772 1.0f, 2.0f, 3.0f,
3773
3774 // Batch 1
3775 10.0f, 11.0f, 12.0f,
3776 }));
3777
Jim Flynncbb66aa2019-05-15 13:03:54 +01003778 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003779 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3780 // Batch 0
3781 4.0f, 5.0f, 6.0f,
3782
3783 // Batch 1
3784 13.0f, 14.0f, 15.0f,
3785
3786 // Batch 0
3787 7.0f, 8.0f, 9.0f,
3788 }));
3789
Jim Flynncbb66aa2019-05-15 13:03:54 +01003790 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003791 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3792 // Batch 1
3793 16.0f, 17.0f, 18.0f,
3794 }));
3795
Jim Flynncbb66aa2019-05-15 13:03:54 +01003796 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003797 LayerTestResult<T, 2> result(outputTensorInfo);
3798
3799 std::vector<T> output;
3800 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003801 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003802 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3803 { input0.data(), input1.data(), input2.data() },
3804 outputTensorInfo,
3805 output.data(),
3806 0,
3807 true);
telsoa014fcda012018-03-09 14:13:49 +00003808
3809 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3810 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3811 // Batch 0
3812 1.0f, 2.0f, 3.0f,
3813
3814 // Batch 1
3815 10.0f, 11.0f, 12.0f,
3816
3817 // Batch 2
3818 4.0f, 5.0f, 6.0f,
3819
3820 // Batch 3
3821 13.0f, 14.0f, 15.0f,
3822
3823 // Batch 4
3824 7.0f, 8.0f, 9.0f,
3825
3826 // Batch 5
3827 16.0f, 17.0f, 18.0f,
3828 }));
3829
3830 return result;
3831}
3832
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003833LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
3834 armnn::IWorkloadFactory& workloadFactory,
3835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003836{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003837 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
3838 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003839}
3840
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003841template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003842LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
3843 armnn::IWorkloadFactory& workloadFactory,
3844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3845 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003846 int32_t qOffset)
3847{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003848 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003849 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3850 // Batch 0
3851 1.0f, 2.0f, 3.0f,
3852
3853 // Batch 1
3854 10.0f, 11.0f, 12.0f,
3855 }));
3856
Jim Flynncbb66aa2019-05-15 13:03:54 +01003857 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003858 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3859 // Batch 0
3860 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
3861
3862 // Batch 1
3863 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
3864 }));
3865
Jim Flynncbb66aa2019-05-15 13:03:54 +01003866 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003867 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3868 // Batch 0
3869 9.0f,
3870
3871 // Batch 1
3872 18.0f
3873 }));
3874
Jim Flynncbb66aa2019-05-15 13:03:54 +01003875 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003876 LayerTestResult<T, 2> result(outputTensorInfo);
3877
3878 std::vector<T> output;
3879 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003880 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003881 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3882 { input0.data(), input1.data(), input2.data() },
3883 outputTensorInfo,
3884 output.data(),
3885 1,
3886 true);
telsoa014fcda012018-03-09 14:13:49 +00003887
3888 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3889 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3890 // Batch 0
3891 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3892
3893 // Batch 1
3894 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
3895 }));
3896
3897 return result;
3898}
3899
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003900LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
3901 armnn::IWorkloadFactory& workloadFactory,
3902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003903{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003904 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
3905 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003906}
3907
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003908template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003909LayerTestResult<T, 3> Concatenation3dTestImpl(
3910 armnn::IWorkloadFactory& workloadFactory,
3911 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003912 const armnn::TensorInfo& outputTensorInfo,
3913 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00003914 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00003915 float qScale,
3916 int32_t qOffset)
3917{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003918 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003919
3920 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3921 // Batch 0, Channel 0
3922 1.0f, 2.0f,
3923
3924 // Batch 0, Channel 1
3925 3.0f, 4.0f,
3926
3927 // Batch 0, Channel 2
3928 5.0f, 6.0f,
3929
3930 // Batch 1, Channel 0
3931 19.0f, 20.0f,
3932
3933 // Batch 1, Channel 1
3934 21.0f, 22.0f,
3935
3936 // Batch 1, Channel 2
3937 23.0f, 24.0f
3938 }));
3939
3940 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3941 // Batch 0, Channel 0
3942 7.0f, 8.0f,
3943
3944 // Batch 0, Channel 1
3945 9.0f, 10.0f,
3946
3947 // Batch 0, Channel 2
3948 11.0f, 12.0f,
3949
3950 // Batch 1, Channel 0
3951 25.0f, 26.0f,
3952
3953 // Batch 1, Channel 1
3954 27.0f, 28.0f,
3955
3956 // Batch 1, Channel 2
3957 29.0f, 30.0f
3958 }));
3959
3960 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3961 // Batch 0, Channel 0
3962 13.0f, 14.0f,
3963
3964 // Batch 0, Channel 1
3965 15.0f, 16.0f,
3966
3967 // Batch 0, Channel 2
3968 17.0f, 18.0f,
3969
3970 // Batch 1, Channel 0
3971 31.0f, 32.0f,
3972
3973 // Batch 1, Channel 1
3974 33.0f, 34.0f,
3975
3976 // Batch 1, Channel 2
3977 35.0f, 36.0f
3978 }));
3979
3980 LayerTestResult<T, 3> result(outputTensorInfo);
3981
3982 std::vector<T> output;
3983 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003984 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003985 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3986 { input0.data(), input1.data(), input2.data() },
3987 outputTensorInfo,
3988 output.data(),
3989 dimension,
3990 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003991
3992 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3993 return result;
3994}
3995
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003996template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003997LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
3998 armnn::IWorkloadFactory& workloadFactory,
3999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4000 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004001 int32_t qOffset)
4002{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004003 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004004
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004005 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4006 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4007
telsoa014fcda012018-03-09 14:13:49 +00004008 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4009 // Batch 0, Channel 0
4010 1.0f, 2.0f,
4011
4012 // Batch 0, Channel 1
4013 3.0f, 4.0f,
4014
4015 // Batch 0, Channel 2
4016 5.0f, 6.0f,
4017
4018 // Batch 1, Channel 0
4019 19.0f, 20.0f,
4020
4021 // Batch 1, Channel 1
4022 21.0f, 22.0f,
4023
4024 // Batch 1, Channel 2
4025 23.0f, 24.0f,
4026
4027 // Batch 2, Channel 0
4028 7.0f, 8.0f,
4029
4030 // Batch 2, Channel 1
4031 9.0f, 10.0f,
4032
4033 // Batch 2, Channel 2
4034 11.0f, 12.0f,
4035
4036 // Batch 3, Channel 0
4037 25.0f, 26.0f,
4038
4039 // Batch 3, Channel 1
4040 27.0f, 28.0f,
4041
4042 // Batch 3, Channel 2
4043 29.0f, 30.0f,
4044
4045 // Batch 4, Channel 0
4046 13.0f, 14.0f,
4047
4048 // Batch 4, Channel 1
4049 15.0f, 16.0f,
4050
4051 // Batch 4, Channel 2
4052 17.0f, 18.0f,
4053
4054 // Batch 5, Channel 0
4055 31.0f, 32.0f,
4056
4057 // Batch 5, Channel 1
4058 33.0f, 34.0f,
4059
4060 // Batch 5, Channel 2
4061 35.0f, 36.0f
4062 }));
narpra015cdda352018-11-19 15:30:27 +00004063
telsoa014fcda012018-03-09 14:13:49 +00004064 return result;
4065}
4066
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004067LayerTestResult<float, 3> Concatenation3dDim0Test(
4068 armnn::IWorkloadFactory& workloadFactory,
4069 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004070{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004071 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004072}
4073
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004074template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004075LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4076 armnn::IWorkloadFactory& workloadFactory,
4077 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4078 float qScale,
4079 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004080{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004081 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004082
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004083 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4084 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004085
telsoa014fcda012018-03-09 14:13:49 +00004086 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4087 // Batch 0, Channel 0
4088 1.0f, 2.0f,
4089
4090 // Batch 0, Channel 1
4091 3.0f, 4.0f,
4092
4093 // Batch 0, Channel 2
4094 5.0f, 6.0f,
4095
4096 // Batch 0, Channel 3
4097 7.0f, 8.0f,
4098
4099 // Batch 0, Channel 4
4100 9.0f, 10.0f,
4101
4102 // Batch 0, Channel 5
4103 11.0f, 12.0f,
4104
4105 // Batch 0, Channel 6
4106 13.0f, 14.0f,
4107
4108 // Batch 0, Channel 7
4109 15.0f, 16.0f,
4110
4111 // Batch 0, Channel 8
4112 17.0f, 18.0f,
4113
4114 // Batch 1, Channel 0
4115 19.0f, 20.0f,
4116
4117 // Batch 1, Channel 1
4118 21.0f, 22.0f,
4119
4120 // Batch 1, Channel 2
4121 23.0f, 24.0f,
4122
4123 // Batch 1, Channel 3
4124 25.0f, 26.0f,
4125
4126 // Batch 1, Channel 4
4127 27.0f, 28.0f,
4128
4129 // Batch 1, Channel 5
4130 29.0f, 30.0f,
4131
4132 // Batch 1, Channel 6
4133 31.0f, 32.0f,
4134
4135 // Batch 1, Channel 7
4136 33.0f, 34.0f,
4137
4138 // Batch 1, Channel 8
4139 35.0f, 36.0f
4140 }));
4141
4142 return result;
4143}
4144
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004145LayerTestResult<float, 3> Concatenation3dDim1Test(
4146 armnn::IWorkloadFactory& workloadFactory,
4147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004148{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004149 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004150}
4151
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004152template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004153LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4154 armnn::IWorkloadFactory& workloadFactory,
4155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004156 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004157 float qScale,
4158 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004159{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004160 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004161
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004162 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4163 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004164
telsoa014fcda012018-03-09 14:13:49 +00004165 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4166 // Batch 0, Channel 0
4167 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4168
4169 // Batch 0, Channel 1
4170 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
4171
4172 // Batch 0, Channel 2
4173 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
4174
4175 // Batch 1, Channel 0
4176 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
4177
4178 // Batch 1, Channel 1
4179 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
4180
4181 // Batch 1, Channel 2
4182 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
4183 }));
4184
4185 return result;
4186}
4187
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004188LayerTestResult<float, 3> Concatenation3dDim2Test(
4189 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4191 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004192{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004193 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
4194 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004195}
4196
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004197template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004198LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4199 armnn::IWorkloadFactory& workloadFactory,
4200 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4201 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004202 int32_t qOffset)
4203{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004204 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004205 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4206 // Batch 0, Channel 0
4207 1.0f, 2.0f,
4208
4209 // Batch 0, Channel 1
4210 3.0f, 4.0f,
4211
4212 // Batch 0, Channel 2
4213 5.0f, 6.0f,
4214
4215 // Batch 1, Channel 0
4216 19.0f, 20.0f,
4217
4218 // Batch 1, Channel 1
4219 21.0f, 22.0f,
4220
4221 // Batch 1, Channel 2
4222 23.0f, 24.0f
4223 }));
4224
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004225 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004226 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4227 // Batch 0, Channel 0
4228 7.0f, 8.0f,
4229
4230 // Batch 0, Channel 1
4231 9.0f, 10.0f,
4232
4233 // Batch 0, Channel 2
4234 11.0f, 12.0f,
4235 }));
4236
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004237 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004238 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4239 // Batch 0, Channel 0
4240 25.0f, 26.0f,
4241
4242 // Batch 0, Channel 1
4243 27.0f, 28.0f,
4244
4245 // Batch 0, Channel 2
4246 29.0f, 30.0f,
4247
4248 // Batch 1, Channel 0
4249 13.0f, 14.0f,
4250
4251 // Batch 1, Channel 1
4252 15.0f, 16.0f,
4253
4254 // Batch 1, Channel 2
4255 17.0f, 18.0f,
4256
4257 // Batch 2, Channel 0
4258 31.0f, 32.0f,
4259
4260 // Batch 2, Channel 1
4261 33.0f, 34.0f,
4262
4263 // Batch 2, Channel 2
4264 35.0f, 36.0f
4265 }));
4266
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004267 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004268 LayerTestResult<T, 3> result(outputTensorInfo);
4269
4270 std::vector<T> output;
4271 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004272 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004273 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4274 { input0.data(), input1.data(), input2.data() },
4275 outputTensorInfo,
4276 output.data(),
4277 0,
4278 true);
telsoa014fcda012018-03-09 14:13:49 +00004279
4280 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4281 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4282 // Batch 0, Channel 0
4283 1.0f, 2.0f,
4284
4285 // Batch 0, Channel 1
4286 3.0f, 4.0f,
4287
4288 // Batch 0, Channel 2
4289 5.0f, 6.0f,
4290
4291 // Batch 1, Channel 0
4292 19.0f, 20.0f,
4293
4294 // Batch 1, Channel 1
4295 21.0f, 22.0f,
4296
4297 // Batch 1, Channel 2
4298 23.0f, 24.0f,
4299
4300 // Batch 2, Channel 0
4301 7.0f, 8.0f,
4302
4303 // Batch 2, Channel 1
4304 9.0f, 10.0f,
4305
4306 // Batch 2, Channel 2
4307 11.0f, 12.0f,
4308
4309 // Batch 3, Channel 0
4310 25.0f, 26.0f,
4311
4312 // Batch 3, Channel 1
4313 27.0f, 28.0f,
4314
4315 // Batch 3, Channel 2
4316 29.0f, 30.0f,
4317
4318 // Batch 4, Channel 0
4319 13.0f, 14.0f,
4320
4321 // Batch 4, Channel 1
4322 15.0f, 16.0f,
4323
4324 // Batch 4, Channel 2
4325 17.0f, 18.0f,
4326
4327 // Batch 5, Channel 0
4328 31.0f, 32.0f,
4329
4330 // Batch 5, Channel 1
4331 33.0f, 34.0f,
4332
4333 // Batch 5, Channel 2
4334 35.0f, 36.0f
4335 }));
4336
4337 return result;
4338}
4339
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004340LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
4341 armnn::IWorkloadFactory& workloadFactory,
4342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004343{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004344 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4345 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004346}
4347
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004348template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004349LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
4350 armnn::IWorkloadFactory& workloadFactory,
4351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4352 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004353 int32_t qOffset)
4354{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004355 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004356 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4357 // Batch 0, Channel 0
4358 1.0f, 2.0f,
4359
4360 // Batch 0, Channel 1
4361 3.0f, 4.0f,
4362
4363 // Batch 0, Channel 2
4364 5.0f, 6.0f,
4365
4366 // Batch 1, Channel 0
4367 19.0f, 20.0f,
4368
4369 // Batch 1, Channel 1
4370 21.0f, 22.0f,
4371
4372 // Batch 1, Channel 2
4373 23.0f, 24.0f
4374 }));
4375
Jim Flynncbb66aa2019-05-15 13:03:54 +01004376 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004377 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4378 // Batch 0, Channel 0
4379 7.0f, 8.0f,
4380
4381 // Batch 0, Channel 1
4382 9.0f, 10.0f,
4383
4384 // Batch 0, Channel 2
4385 11.0f, 12.0f,
4386
4387 // Batch 0, Channel 3
4388 25.0f, 26.0f,
4389
4390 // Batch 1, Channel 0
4391 27.0f, 28.0f,
4392
4393 // Batch 1, Channel 1
4394 29.0f, 30.0f,
4395
4396 // Batch 1, Channel 2
4397 13.0f, 14.0f,
4398
4399 // Batch 1, Channel 3
4400 15.0f, 16.0f,
4401 }));
4402
Jim Flynncbb66aa2019-05-15 13:03:54 +01004403 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004404 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4405 // Batch 0, Channel 0
4406 17.0f, 18.0f,
4407
4408 // Batch 1, Channel 0
4409 31.0f, 32.0f,
4410 }));
4411
Jim Flynncbb66aa2019-05-15 13:03:54 +01004412 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004413 LayerTestResult<T, 3> result(outputTensorInfo);
4414
4415 std::vector<T> output;
4416 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004417 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004418 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4419 { input0.data(), input1.data(), input2.data() },
4420 outputTensorInfo,
4421 output.data(),
4422 1,
4423 true);
telsoa014fcda012018-03-09 14:13:49 +00004424
4425 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4426 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4427 // Batch 0, Channel 0
4428 1.0f, 2.0f,
4429
4430 // Batch 0, Channel 1
4431 3.0f, 4.0f,
4432
4433 // Batch 0, Channel 2
4434 5.0f, 6.0f,
4435
4436 // Batch 0, Channel 3
4437 7.0f, 8.0f,
4438
4439 // Batch 0, Channel 4
4440 9.0f, 10.0f,
4441
4442 // Batch 0, Channel 5
4443 11.0f, 12.0f,
4444
4445 // Batch 0, Channel 6
4446 25.0f, 26.0f,
4447
4448 // Batch 0, Channel 7
4449 17.0f, 18.0f,
4450
4451 // Batch 1, Channel 0
4452 19.0f, 20.0f,
4453
4454 // Batch 1, Channel 1
4455 21.0f, 22.0f,
4456
4457 // Batch 1, Channel 2
4458 23.0f, 24.0f,
4459
4460 // Batch 1, Channel 3
4461 27.0f, 28.0f,
4462
4463 // Batch 1, Channel 4
4464 29.0f, 30.0f,
4465
4466 // Batch 1, Channel 5
4467 13.0f, 14.0f,
4468
4469 // Batch 1, Channel 6
4470 15.0f, 16.0f,
4471
4472 // Batch 1, Channel 7
4473 31.0f, 32.0f,
4474 }));
4475
4476 return result;
4477}
4478
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004479LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
4480 armnn::IWorkloadFactory& workloadFactory,
4481 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004482{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004483 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4484 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004485}
4486
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004487template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004488LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
4489 armnn::IWorkloadFactory& workloadFactory,
4490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004491 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004492 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004493 int32_t qOffset)
4494{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004495 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004496 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4497 // Batch 0, Channel 0
4498 1.0f, 2.0f,
4499
4500 // Batch 0, Channel 1
4501 3.0f, 4.0f,
4502
4503 // Batch 0, Channel 2
4504 5.0f, 6.0f,
4505
4506 // Batch 1, Channel 0
4507 19.0f, 20.0f,
4508
4509 // Batch 1, Channel 1
4510 21.0f, 22.0f,
4511
4512 // Batch 1, Channel 2
4513 23.0f, 24.0f
4514 }));
4515
Jim Flynncbb66aa2019-05-15 13:03:54 +01004516 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004517 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4518 // Batch 0, Channel 0
4519 7.0f,
4520
4521 // Batch 0, Channel 1
4522 9.0f,
4523
4524 // Batch 0, Channel 2
4525 11.0f,
4526
4527 // Batch 1, Channel 0
4528 25.0f,
4529
4530 // Batch 1, Channel 1
4531 27.0f,
4532
4533 // Batch 1, Channel 2
4534 29.0f
4535 }));
4536
Jim Flynncbb66aa2019-05-15 13:03:54 +01004537 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004538 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4539 // Batch 0, Channel 0
4540 13.0f, 14.0f, 50.0f,
4541
4542 // Batch 0, Channel 1
4543 15.0f, 16.0f, 51.0f,
4544
4545 // Batch 0, Channel 2
4546 17.0f, 18.0f, 52.0f,
4547
4548 // Batch 1, Channel 0
4549 31.0f, 32.0f, 53.0f,
4550
4551 // Batch 1, Channel 1
4552 33.0f, 34.0f, 54.0f,
4553
4554 // Batch 1, Channel 2
4555 35.0f, 36.0f, 55.0f,
4556 }));
4557
Jim Flynncbb66aa2019-05-15 13:03:54 +01004558 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004559 LayerTestResult<T, 3> result(outputTensorInfo);
4560
4561 std::vector<T> output;
4562 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004563 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004564 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4565 { input0.data(), input1.data(), input2.data() },
4566 outputTensorInfo,
4567 output.data(),
4568 2,
4569 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004570
4571 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4572 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4573 // Batch 0, Channel 0
4574 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
4575
4576 // Batch 0, Channel 1
4577 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
4578
4579 // Batch 0, Channel 2
4580 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
4581
4582 // Batch 1, Channel 0
4583 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
4584
4585 // Batch 1, Channel 1
4586 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
4587
4588 // Batch 1, Channel 2
4589 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
4590 }));
4591
4592 return result;
4593}
4594
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004595LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
4596 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4598 bool useSubtensor)
4599{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004600 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
4601 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004602}
4603
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004604template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004605LayerTestResult<T, 4> Concatenation4dTestImpl(
4606 armnn::IWorkloadFactory& workloadFactory,
4607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4608 const armnn::TensorInfo& outputTensorInfo,
4609 unsigned int dimension,
4610 bool useSubtensor,
4611 float qScale,
4612 int32_t qOffset)
4613{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004614 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004615
4616 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4617 1.0f, 2.0f,
4618 3.0f, 4.0f,
4619 5.0f, 6.0f,
4620 7.0f, 8.0f,
4621 9.0f, 10.0f,
4622 11.0f, 12.0f
4623 }));
4624
4625 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4626 11.0f, 12.0f,
4627 13.0f, 14.0f,
4628 15.0f, 16.0f,
4629 17.0f, 18.0f,
4630 19.0f, 20.0f,
4631 21.0f, 22.0f
4632 }));
4633
4634 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4635 21.0f, 22.0f,
4636 23.0f, 24.0f,
4637 25.0f, 26.0f,
4638 27.0f, 28.0f,
4639 29.0f, 30.0f,
4640 31.0f, 32.0f
4641 }));
4642
4643 LayerTestResult<T, 4> result(outputTensorInfo);
4644
4645 std::vector<T> output;
4646 output.resize(outputTensorInfo.GetNumElements());
4647
4648 Concatenate<T>(workloadFactory,
4649 memoryManager,
4650 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
4651 {input0.data(), input1.data(), input2.data()},
4652 outputTensorInfo,
4653 output.data(),
4654 dimension,
4655 useSubtensor);
4656
4657 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4658 return result;
4659}
4660
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004661template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004662LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
4663 armnn::IWorkloadFactory& workloadFactory,
4664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4665 float qScale,
4666 int32_t qOffset)
4667{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004668 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004669
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004670 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4671 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4672
narpra015cdda352018-11-19 15:30:27 +00004673 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4674 1.0f, 2.0f,
4675 3.0f, 4.0f,
4676 5.0f, 6.0f,
4677 7.0f, 8.0f,
4678 9.0f, 10.0f,
4679 11.0f, 12.0f,
4680
4681 11.0f, 12.0f,
4682 13.0f, 14.0f,
4683 15.0f, 16.0f,
4684 17.0f, 18.0f,
4685 19.0f, 20.0f,
4686 21.0f, 22.0f,
4687
4688 21.0f, 22.0f,
4689 23.0f, 24.0f,
4690 25.0f, 26.0f,
4691 27.0f, 28.0f,
4692 29.0f, 30.0f,
4693 31.0f, 32.0f
4694 }));
4695 return result;
4696}
4697
4698LayerTestResult<float, 4> Concatenation4dDim0Test(
4699 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004701{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004702 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004703}
4704
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004705template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004706LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
4707 armnn::IWorkloadFactory& workloadFactory,
4708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4709 float qScale,
4710 int32_t qOffset)
4711{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004712 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004713
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004714 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4715 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
4716
narpra015cdda352018-11-19 15:30:27 +00004717 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4718 1.0f, 2.0f,
4719 3.0f, 4.0f,
4720 5.0f, 6.0f,
4721 7.0f, 8.0f,
4722 9.0f, 10.0f,
4723 11.0f, 12.0f,
4724
4725 11.0f, 12.0f,
4726 13.0f, 14.0f,
4727 15.0f, 16.0f,
4728 17.0f, 18.0f,
4729 19.0f, 20.0f,
4730 21.0f, 22.0f,
4731
4732 21.0f, 22.0f,
4733 23.0f, 24.0f,
4734 25.0f, 26.0f,
4735 27.0f, 28.0f,
4736 29.0f, 30.0f,
4737 31.0f, 32.0f
4738 }));
4739
4740 return result;
4741}
4742
4743LayerTestResult<float, 4> Concatenation4dDim1Test(
4744 armnn::IWorkloadFactory& workloadFactory,
4745 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4746{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004747 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004748}
4749
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004750template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004751LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
4752 armnn::IWorkloadFactory& workloadFactory,
4753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4754 float qScale,
4755 int32_t qOffset)
4756{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004757 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004758
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004759 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4760 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
4761
narpra015cdda352018-11-19 15:30:27 +00004762 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4763 1.0f, 2.0f,
4764 3.0f, 4.0f,
4765 11.0f, 12.0f,
4766 13.0f, 14.0f,
4767 21.0f, 22.0f,
4768 23.0f, 24.0f,
4769
4770 5.0f, 6.0f,
4771 7.0f, 8.0f,
4772 15.0f, 16.0f,
4773 17.0f, 18.0f,
4774 25.0f, 26.0f,
4775 27.0f, 28.0f,
4776
4777 9.0f, 10.0f,
4778 11.0f, 12.0f,
4779 19.0f, 20.0f,
4780 21.0f, 22.0f,
4781 29.0f, 30.0f,
4782 31.0f, 32.0f
4783 }));
4784
4785 return result;
4786}
4787
4788LayerTestResult<float, 4> Concatenation4dDim2Test(
4789 armnn::IWorkloadFactory& workloadFactory,
4790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4791{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004792 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004793}
4794
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004795template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004796LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
4797 armnn::IWorkloadFactory& workloadFactory,
4798 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4799 float qScale,
4800 int32_t qOffset,
4801 bool useSubtensor)
4802{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004803 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004804
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004805 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4806 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
4807
narpra015cdda352018-11-19 15:30:27 +00004808 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4809 1.0f, 2.0f,
4810 11.0f, 12.0f,
4811 21.0f, 22.0f,
4812 3.0f, 4.0f,
4813 13.0f, 14.0f,
4814 23.0f, 24.0f,
4815
4816 5.0f, 6.0f,
4817 15.0f, 16.0f,
4818 25.0f, 26.0f,
4819 7.0f, 8.0f,
4820 17.0f, 18.0f,
4821 27.0f, 28.0f,
4822
4823 9.0f, 10.0f,
4824 19.0f, 20.0f,
4825 29.0f, 30.0f,
4826 11.0f, 12.0f,
4827 21.0f, 22.0f,
4828 31.0f, 32.0f
4829 }));
4830
4831 return result;
4832}
4833
4834LayerTestResult<float, 4> Concatenation4dDim3Test(
4835 armnn::IWorkloadFactory& workloadFactory,
4836 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4837 bool useSubtensor)
4838{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004839 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
4840 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00004841}
4842
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004843template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004844LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
4845 armnn::IWorkloadFactory& workloadFactory,
4846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4847 float qScale,
4848 int32_t qOffset)
4849{
4850 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01004851 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004852
4853 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4854 1.0f, 2.0f,
4855 3.0f, 4.0f,
4856 5.0f, 6.0f,
4857 7.0f, 8.0f,
4858 9.0f, 10.0f,
4859 11.0f, 12.0f
4860 }));
4861
Jim Flynncbb66aa2019-05-15 13:03:54 +01004862 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004863
4864 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4865 11.0f, 12.0f,
4866 13.0f, 14.0f,
4867 15.0f, 16.0f,
4868 17.0f, 18.0f,
4869 19.0f, 20.0f,
4870 21.0f, 22.0f,
4871
4872 21.0f, 22.0f,
4873 23.0f, 24.0f,
4874 25.0f, 26.0f,
4875 27.0f, 28.0f,
4876 29.0f, 30.0f,
4877 31.0f, 32.0f
4878
4879 }));
4880
Jim Flynncbb66aa2019-05-15 13:03:54 +01004881 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004882
4883 LayerTestResult<T, 4> result(outputTensorInfo);
4884
4885 std::vector<T> output;
4886 output.resize(outputTensorInfo.GetNumElements());
4887 Concatenate<T>(workloadFactory,
4888 memoryManager,
4889 {inputTensorInfo0, inputTensorInfo1},
4890 {input0.data(), input1.data()},
4891 outputTensorInfo,
4892 output.data(),
4893 dimension,
4894 true);
4895
4896 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4897 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4898 1.0f, 2.0f,
4899 3.0f, 4.0f,
4900 5.0f, 6.0f,
4901 7.0f, 8.0f,
4902 9.0f, 10.0f,
4903 11.0f, 12.0f,
4904
4905 11.0f, 12.0f,
4906 13.0f, 14.0f,
4907 15.0f, 16.0f,
4908 17.0f, 18.0f,
4909 19.0f, 20.0f,
4910 21.0f, 22.0f,
4911
4912 21.0f, 22.0f,
4913 23.0f, 24.0f,
4914 25.0f, 26.0f,
4915 27.0f, 28.0f,
4916 29.0f, 30.0f,
4917 31.0f, 32.0f
4918 }));
4919
4920 return result;
4921}
4922
4923LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
4924 armnn::IWorkloadFactory& workloadFactory,
4925 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4926{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004927 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
4928 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004929}
4930
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004931template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004932LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
4933 armnn::IWorkloadFactory& workloadFactory,
4934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4935 float qScale,
4936 int32_t qOffset)
4937{
4938 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01004939 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004940
4941 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4942 1.0f, 2.0f,
4943 3.0f, 4.0f,
4944 5.0f, 6.0f,
4945 7.0f, 8.0f,
4946 9.0f, 10.0f,
4947 11.0f, 12.0f
4948 }));
4949
Jim Flynncbb66aa2019-05-15 13:03:54 +01004950 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004951
4952 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4953 11.0f, 12.0f,
4954 13.0f, 14.0f,
4955 15.0f, 16.0f,
4956 17.0f, 18.0f,
4957
4958 }));
4959
Jim Flynncbb66aa2019-05-15 13:03:54 +01004960 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004961
4962 LayerTestResult<T, 4> result(outputTensorInfo);
4963
4964 std::vector<T> output;
4965 output.resize(outputTensorInfo.GetNumElements());
4966 Concatenate<T>(workloadFactory,
4967 memoryManager,
4968 {inputTensorInfo0, inputTensorInfo1},
4969 {input0.data(), input1.data()},
4970 outputTensorInfo,
4971 output.data(),
4972 dimension,
4973 true);
4974
4975 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4976 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4977 1.0f, 2.0f,
4978 3.0f, 4.0f,
4979 5.0f, 6.0f,
4980 7.0f, 8.0f,
4981 9.0f, 10.0f,
4982 11.0f, 12.0f,
4983 11.0f, 12.0f,
4984 13.0f, 14.0f,
4985 15.0f, 16.0f,
4986 17.0f, 18.0f
4987 }));
4988
4989 return result;
4990}
4991
4992LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
4993 armnn::IWorkloadFactory& workloadFactory,
4994 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4995{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004996 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
4997 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004998}
4999
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005000template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005001LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5002 armnn::IWorkloadFactory& workloadFactory,
5003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5004 float qScale,
5005 int32_t qOffset)
5006{
5007 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005008 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005009
5010 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5011 1.0f, 2.0f,
5012 3.0f, 4.0f,
5013 5.0f, 6.0f,
5014 7.0f, 8.0f,
5015 9.0f, 10.0f,
5016 11.0f, 12.0f
5017 }));
5018
Jim Flynncbb66aa2019-05-15 13:03:54 +01005019 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005020
5021 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5022 11.0f, 12.0f,
5023 13.0f, 14.0f,
5024 15.0f, 16.0f,
5025 17.0f, 18.0f,
5026 19.0f, 20.0f,
5027 21.0f, 22.0f,
5028 23.0f, 24.0f,
5029 25.0f, 26.0f,
5030 27.0f, 28.0f
5031 }));
5032
Jim Flynncbb66aa2019-05-15 13:03:54 +01005033 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005034
5035 LayerTestResult<T, 4> result(outputTensorInfo);
5036
5037 std::vector<T> output;
5038 output.resize(outputTensorInfo.GetNumElements());
5039 Concatenate<T>(workloadFactory,
5040 memoryManager,
5041 {inputTensorInfo0, inputTensorInfo1},
5042 {input0.data(), input1.data()},
5043 outputTensorInfo,
5044 output.data(),
5045 dimension,
5046 true);
5047
5048 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5049 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5050 1.0f, 2.0f,
5051 3.0f, 4.0f,
5052 11.0f, 12.0f,
5053 13.0f, 14.0f,
5054 15.0f, 16.0f,
5055
5056 5.0f, 6.0f,
5057 7.0f, 8.0f,
5058 17.0f, 18.0f,
5059 19.0f, 20.0f,
5060 21.0f, 22.0f,
5061
5062 9.0f, 10.0f,
5063 11.0f, 12.0f,
5064 23.0f, 24.0f,
5065 25.0f, 26.0f,
5066 27.0f, 28.0f
5067 }));
5068
5069 return result;
5070}
5071
5072LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5073 armnn::IWorkloadFactory& workloadFactory,
5074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5075{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005076 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5077 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005078}
5079
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005080template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005081LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5082 armnn::IWorkloadFactory& workloadFactory,
5083 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5084 float qScale,
5085 int32_t qOffset,
5086 bool useSubtensor)
5087{
5088 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005089 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005090
5091 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5092 1.0f, 2.0f,
5093 3.0f, 4.0f,
5094 5.0f, 6.0f,
5095 7.0f, 8.0f,
5096 9.0f, 10.0f,
5097 11.0f, 12.0f
5098 }));
5099
Jim Flynncbb66aa2019-05-15 13:03:54 +01005100 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005101
5102 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5103 11.0f, 12.0f, 13.0f,
5104 14.0f, 15.0f, 16.0f,
5105
5106 17.0f, 18.0f, 19.0f,
5107 20.0f, 21.0f, 22.0f,
5108
5109 23.0f, 24.0f, 25.0f,
5110 26.0f, 27.0f, 28.0f
5111 }));
5112
Jim Flynncbb66aa2019-05-15 13:03:54 +01005113 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005114
5115 LayerTestResult<T, 4> result(outputTensorInfo);
5116
5117 std::vector<T> output;
5118 output.resize(outputTensorInfo.GetNumElements());
5119 Concatenate<T>(workloadFactory,
5120 memoryManager,
5121 {inputTensorInfo0, inputTensorInfo1},
5122 {input0.data(), input1.data()},
5123 outputTensorInfo,
5124 output.data(),
5125 dimension,
5126 useSubtensor);
5127
5128 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5129 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5130 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5131 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5132 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5133 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5134 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5135 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5136 }));
5137
5138 return result;
5139}
5140
5141LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5142 armnn::IWorkloadFactory& workloadFactory,
5143 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5144 bool useSubtensor)
5145{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005146 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5147 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005148}
5149
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005150LayerTestResult<float, 4> ResizeBilinearNopTest(
5151 armnn::IWorkloadFactory& workloadFactory,
5152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005153 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005154{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005155 const armnn::TensorInfo inputTensorInfo =
5156 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5157
5158 const armnn::TensorInfo outputTensorInfo =
5159 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005160
James Conroy6b965822018-11-01 11:33:09 +00005161 std::vector<float> inputData({
5162 1.0f, 2.0f, 3.0f, 4.0f,
5163 2.0f, 3.0f, 4.0f, 5.0f,
5164 3.0f, 4.0f, 5.0f, 6.0f,
5165 4.0f, 5.0f, 6.0f, 7.0f,
5166
telsoa014fcda012018-03-09 14:13:49 +00005167 1.0f, 2.0f, 3.0f, 4.0f,
5168 2.0f, 3.0f, 4.0f, 5.0f,
5169 3.0f, 4.0f, 5.0f, 6.0f,
5170 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00005171 });
5172
5173 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005174 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005175 {
5176 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005177 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005178 inputData = tmp;
5179 }
5180
5181 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005182
5183 LayerTestResult<float, 4> result(outputTensorInfo);
5184 result.outputExpected = input;
5185
5186 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5187 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5188
5189 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005190 descriptor.m_Parameters.m_DataLayout = dataLayout;
5191 armnn::WorkloadInfo info;
5192 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5193 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5194
5195 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5196
5197 inputHandle->Allocate();
5198 outputHandle->Allocate();
5199 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5200
Derek Lambertif30f7d32019-04-09 10:25:02 +01005201 workload->PostAllocationConfigure();
James Conroy074f3712018-10-03 09:32:03 +01005202 workload->Execute();
5203
5204 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5205 return result;
5206}
5207
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005208LayerTestResult<float, 4> SimpleResizeBilinearTest(
5209 armnn::IWorkloadFactory& workloadFactory,
5210 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005211 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01005212{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005213 const armnn::TensorInfo inputTensorInfo =
5214 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
5215
5216 const armnn::TensorInfo outputTensorInfo =
5217 armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
James Conroy074f3712018-10-03 09:32:03 +01005218
James Conroy6b965822018-11-01 11:33:09 +00005219 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005220 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00005221 200.0f, 250.0f,
5222
5223 250.0f, 200.0f,
5224 250.0f, 1.0f
5225 });
James Conroy074f3712018-10-03 09:32:03 +01005226
5227 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5228 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00005229 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
5230 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
5231 // which we would expect if projecting the centre).
5232
5233 std::vector<float> outputData({
5234 1.0f,
5235
5236 250.0f
5237 });
5238
5239 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005240 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005241 {
5242 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005243 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005244 inputData = tmp;
5245
5246 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005247 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005248 outputData = tmp1;
5249 }
5250
5251 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5252
James Conroy074f3712018-10-03 09:32:03 +01005253 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005254 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01005255
5256 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5257 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5258
5259 armnn::ResizeBilinearQueueDescriptor descriptor;
5260 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005261 armnn::WorkloadInfo info;
5262 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5263 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5264
5265 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5266
5267 inputHandle->Allocate();
5268 outputHandle->Allocate();
5269 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5270
Derek Lambertif30f7d32019-04-09 10:25:02 +01005271 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005272 workload->Execute();
5273
5274 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5275 return result;
5276}
5277
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005278LayerTestResult<float, 4> ResizeBilinearSqMinTest(
5279 armnn::IWorkloadFactory& workloadFactory,
5280 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005281 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005282{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005283 const armnn::TensorInfo inputTensorInfo =
5284 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5285
5286 const armnn::TensorInfo outputTensorInfo =
5287 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005288
James Conroy6b965822018-11-01 11:33:09 +00005289 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005290 1.0f, 2.0f, 3.0f, 4.0f,
5291 2.0f, 3.0f, 4.0f, 5.0f,
5292 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00005293 4.0f, 5.0f, 6.0f, 7.0f,
5294
5295 7.0f, 6.0f, 5.0f, 4.0f,
5296 6.0f, 5.0f, 4.0f, 3.0f,
5297 5.0f, 4.0f, 3.0f, 2.0f,
5298 4.0f, 3.0f, 2.0f, 1.0f
5299 });
5300
5301 std::vector<float> outputData({
5302 1.0f, 3.0f,
5303 3.0f, 5.0f,
5304
5305 7.0f, 5.0f,
5306 5.0f, 3.0f
5307 });
5308
5309 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005310 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005311 {
5312 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005313 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005314 inputData = tmp;
5315
5316 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005317 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005318 outputData = tmp1;
5319 }
5320
5321 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005322
telsoa014fcda012018-03-09 14:13:49 +00005323 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005324 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005325
5326 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5327 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5328
5329 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005330 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005331 armnn::WorkloadInfo info;
5332 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5333 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5334
5335 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5336
5337 inputHandle->Allocate();
5338 outputHandle->Allocate();
5339 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5340
Derek Lambertif30f7d32019-04-09 10:25:02 +01005341 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005342 workload->Execute();
5343
5344 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5345 return result;
5346}
5347
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005348LayerTestResult<float, 4> ResizeBilinearMinTest(
5349 armnn::IWorkloadFactory& workloadFactory,
5350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005351 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005352{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005353 const armnn::TensorInfo inputTensorInfo =
5354 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
5355
5356 const armnn::TensorInfo outputTensorInfo =
5357 armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005358
James Conroy6b965822018-11-01 11:33:09 +00005359 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005360 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
5361 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00005362 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
5363
5364 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
5365 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
5366 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
5367 });
5368
5369 std::vector<float> outputData({
5370 1.0f, 2.6666f, 6.00f,
5371 78.5f, 179.3333f, 401.00f,
5372
5373 987.0f, 454.6670f, 203.33f,
5374 48.5f, 22.3333f, 10.00f
5375 });
5376
5377 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005378 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005379 {
5380 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005381 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005382 inputData = tmp;
5383
5384 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005385 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005386 outputData = tmp1;
5387 }
5388
5389 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005390
5391 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005392 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005393
5394 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5395 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5396
5397 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005398 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005399 armnn::WorkloadInfo info;
5400 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5401 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5402
5403 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5404
5405 inputHandle->Allocate();
5406 outputHandle->Allocate();
5407 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5408
Derek Lambertif30f7d32019-04-09 10:25:02 +01005409 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005410 workload->Execute();
5411
5412 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5413 return result;
5414}
5415
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005416LayerTestResult<float, 4> ResizeBilinearMagTest(
5417 armnn::IWorkloadFactory& workloadFactory,
5418 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005419 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005420{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005421 const armnn::TensorInfo inputTensorInfo =
5422 armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
5423
5424 const armnn::TensorInfo outputTensorInfo =
5425 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005426
James Conroy6b965822018-11-01 11:33:09 +00005427 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005428 1.0f, 2.0f,
5429 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005430 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00005431
James Conroy6b965822018-11-01 11:33:09 +00005432 233.0f, 144.0f,
5433 21.0f, 13.0f,
5434 2.0f, 1.0f
5435 });
5436
5437 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01005438 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
5439 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005440 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
5441
5442 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
5443 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
5444 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
5445 });
5446
5447 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005448 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005449 {
5450 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005451 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005452 inputData = tmp;
5453
5454 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005455 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005456 outputData = tmp1;
5457 }
5458
5459 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5460
5461 LayerTestResult<float, 4> result(outputTensorInfo);
5462 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005463
5464 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5465 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5466
5467 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005468 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005469 armnn::WorkloadInfo info;
5470 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5471 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5472
5473 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5474
5475 inputHandle->Allocate();
5476 outputHandle->Allocate();
5477 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5478
Derek Lambertif30f7d32019-04-09 10:25:02 +01005479 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005480 workload->Execute();
5481
5482 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5483 return result;
5484}
5485
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005486LayerTestResult<float, 2> FakeQuantizationTest(
5487 armnn::IWorkloadFactory& workloadFactory,
5488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005489{
5490 constexpr unsigned int width = 2;
5491 constexpr unsigned int height = 3;
5492
5493 const armnn::TensorInfo tensorInfo({height, width },
5494 armnn::DataType::Float32);
5495 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5496 -10.0f, -5.0f,
5497 0.0f, 5.0f,
5498 10.0f, 10.0f
5499 }));
5500
5501 LayerTestResult<float, 2> ret(tensorInfo);
5502
5503 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5504
5505 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5506
5507 armnn::FakeQuantizationQueueDescriptor data;
5508 armnn::WorkloadInfo info;
5509
5510 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5511 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5512 float min = -10.f;
5513 float max = 10.f;
5514
5515 data.m_Parameters.m_Min = min;
5516 data.m_Parameters.m_Max = max;
5517
5518 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5519 armnn::FakeQuantizationQueueDescriptor refData = data;
5520 armnn::WorkloadInfo refInfo = info;
5521 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5522
5523 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5524
5525 inputHandle->Allocate();
5526 outputHandle->Allocate();
5527
5528 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5529
Derek Lambertif30f7d32019-04-09 10:25:02 +01005530 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005531 workload->Execute();
5532
5533 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5534
5535 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5536 0.0f, 63.0f,
5537 128.0f, 191.0f,
5538 255.0f, 255.0f
5539 }));
5540 return ret;
5541}
5542
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005543namespace
5544{
5545
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005546LayerTestResult<float, 4> L2NormalizationTestImpl(
5547 armnn::IWorkloadFactory& workloadFactory,
5548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5549 const armnn::TensorShape& inputOutputTensorShape,
5550 const std::vector<float>& inputValues,
5551 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00005552 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005553{
5554 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5555 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5556
jimfly013aab7c32018-11-12 13:32:08 +00005557 // at this point if we require it permute the input data
5558 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5559 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005560 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005561 {
5562 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005563 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005564 inputData = tmp;
5565 }
5566
5567 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005568
5569 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00005570 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005571 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005572 {
5573 std::vector<float> tmp(expectedOutputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005574 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
5575 expectedOutputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005576 expectedOutputData = tmp;
5577 }
5578 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005579
5580 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5581 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5582
5583 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00005584 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005585 armnn::WorkloadInfo info;
5586
5587 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5588 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5589
5590 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5591
5592 inputHandle->Allocate();
5593 outputHandle->Allocate();
5594
5595 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5596
Derek Lambertif30f7d32019-04-09 10:25:02 +01005597 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005598 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005599
5600 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5601
5602 return result;
5603}
5604
5605float CalcInvL2Norm(std::initializer_list<float> elements)
5606{
5607 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5608 [](float acc, float element) { return acc + element * element; });
5609 return 1.0f / sqrtf(reduction);
5610}
5611
5612} // anonymous namespace
5613
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005614template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005615LayerTestResult<T, 2> Pad2dTestCommon(
5616 armnn::IWorkloadFactory& workloadFactory,
5617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5618 float qScale,
5619 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005620{
Derek Lambertif30f7d32019-04-09 10:25:02 +01005621 const armnn::TensorShape inputShape{ 3, 3 };
5622 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005623
Derek Lambertif30f7d32019-04-09 10:25:02 +01005624 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5625 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005626
Derek Lambertif30f7d32019-04-09 10:25:02 +01005627 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005628 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005629 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005630 // Height (3) x Width (3)
5631 4, 8, 6,
5632 7, 4, 4,
5633 3, 2, 4
5634 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005635
Derek Lambertif30f7d32019-04-09 10:25:02 +01005636 std::vector<T> expectedOutputValues(
5637 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005638 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005639 0, 0, 0, 0, 0, 0, 0,
5640 0, 0, 0, 0, 0, 0, 0,
5641 0, 0, 4, 8, 6, 0, 0,
5642 0, 0, 7, 4, 4, 0, 0,
5643 0, 0, 3, 2, 4, 0, 0,
5644 0, 0, 0, 0, 0, 0, 0,
5645 0, 0, 0, 0, 0, 0, 0
5646 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005647
Derek Lambertif30f7d32019-04-09 10:25:02 +01005648 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005649
Derek Lambertif30f7d32019-04-09 10:25:02 +01005650 LayerTestResult<T, 2> result(outputTensorInfo);
5651 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005652
Derek Lambertif30f7d32019-04-09 10:25:02 +01005653 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5654 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005655
Derek Lambertif30f7d32019-04-09 10:25:02 +01005656 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005657
Derek Lambertif30f7d32019-04-09 10:25:02 +01005658 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5659 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5660 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005661
Derek Lambertif30f7d32019-04-09 10:25:02 +01005662 descriptor.m_Parameters.m_PadList = PadList;
5663 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005664
Derek Lambertif30f7d32019-04-09 10:25:02 +01005665 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5666 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005667
Derek Lambertif30f7d32019-04-09 10:25:02 +01005668 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005669
Derek Lambertif30f7d32019-04-09 10:25:02 +01005670 inputHandle->Allocate();
5671 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005672
Derek Lambertif30f7d32019-04-09 10:25:02 +01005673 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005674
Derek Lambertif30f7d32019-04-09 10:25:02 +01005675 workload->PostAllocationConfigure();
5676 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005677
Derek Lambertif30f7d32019-04-09 10:25:02 +01005678 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005679
Derek Lambertif30f7d32019-04-09 10:25:02 +01005680 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005681}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005682
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005683template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005684LayerTestResult<T, 3> Pad3dTestCommon(
5685 armnn::IWorkloadFactory& workloadFactory,
5686 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5687 float qScale,
5688 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005689{
5690 const armnn::TensorShape inputShape{ 2, 2, 2 };
5691 const armnn::TensorShape outputShape{ 3, 5, 6 };
5692
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005693 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5694 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005695
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005696 std::vector<T> inputValues(
5697 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005698 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005699 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005700 0, 4,
5701 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005702
5703 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005704 6, 1,
5705 5, 2
5706 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005707
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005708 std::vector<T> expectedOutputValues(
5709 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005710 {
5711
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005712 0, 0, 0, 0, 0, 0,
5713 0, 0, 0, 0, 0, 0,
5714 0, 0, 0, 4, 0, 0,
5715 0, 0, 2, 5, 0, 0,
5716 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005717
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005718 0, 0, 0, 0, 0, 0,
5719 0, 0, 0, 0, 0, 0,
5720 0, 0, 6, 1, 0, 0,
5721 0, 0, 5, 2, 0, 0,
5722 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005723
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005724 0, 0, 0, 0, 0, 0,
5725 0, 0, 0, 0, 0, 0,
5726 0, 0, 0, 0, 0, 0,
5727 0, 0, 0, 0, 0, 0,
5728 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005729
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005730 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005731
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005732 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005733
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005734 LayerTestResult<T, 3> result(outputTensorInfo);
5735 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005736
5737 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5738 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5739
5740 armnn::PadQueueDescriptor descriptor;
5741
5742 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5743 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5744 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5745 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5746
5747 descriptor.m_Parameters.m_PadList = PadList;
5748 armnn::WorkloadInfo info;
5749
5750 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5751 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5752
5753 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5754
5755 inputHandle->Allocate();
5756 outputHandle->Allocate();
5757
5758 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
5759
Derek Lambertif30f7d32019-04-09 10:25:02 +01005760 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005761 workload->Execute();
5762
5763 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
5764
5765 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005766}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005767
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005768template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005769LayerTestResult<T, 4> Pad4dTestCommon(
5770 armnn::IWorkloadFactory& workloadFactory,
5771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5772 float qScale,
5773 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005774{
5775 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
5776 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
5777
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005778 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5779 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005780
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005781 std::vector<T> inputValues(
5782 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005783 {
5784 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005785 0, 1,
5786 2, 3,
5787 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005788
5789 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005790 6, 7,
5791 8, 9,
5792 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005793
5794 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005795 12, 13,
5796 14, 15,
5797 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005798
5799 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005800 18, 19,
5801 20, 21,
5802 22, 23
5803 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005804
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005805 std::vector<T> expectedOutputValues(
5806 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005807 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005808 0, 0, 0, 0,
5809 0, 0, 0, 0,
5810 0, 0, 0, 0,
5811 0, 0, 0, 0,
5812 0, 0, 0, 0,
5813 0, 0, 0, 0,
5814 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005815
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005816 0, 0, 0, 0,
5817 0, 0, 0, 0,
5818 0, 0, 0, 0,
5819 0, 0, 0, 0,
5820 0, 0, 0, 0,
5821 0, 0, 0, 0,
5822 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005823
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005824 0, 0, 0, 0,
5825 0, 0, 0, 0,
5826 0, 0, 0, 0,
5827 0, 0, 0, 0,
5828 0, 0, 0, 0,
5829 0, 0, 0, 0,
5830 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005831
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005832 0, 0, 0, 0,
5833 0, 0, 0, 0,
5834 0, 0, 0, 0,
5835 0, 0, 0, 0,
5836 0, 0, 0, 0,
5837 0, 0, 0, 0,
5838 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005839
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005840 0, 0, 0, 0,
5841 0, 0, 0, 0,
5842 0, 0, 0, 0,
5843 0, 0, 0, 0,
5844 0, 0, 0, 0,
5845 0, 0, 0, 0,
5846 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005847
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005848 0, 0, 0, 0,
5849 0, 0, 0, 0,
5850 0, 0, 0, 0,
5851 0, 0, 0, 0,
5852 0, 0, 0, 0,
5853 0, 0, 0, 0,
5854 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005855
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005856 0, 0, 0, 0,
5857 0, 0, 0, 0,
5858 0, 0, 0, 0,
5859 0, 0, 0, 0,
5860 0, 0, 0, 0,
5861 0, 0, 0, 0,
5862 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005863
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005864 0, 0, 0, 0,
5865 0, 0, 0, 0,
5866 0, 0, 0, 0,
5867 0, 0, 1, 0,
5868 0, 2, 3, 0,
5869 0, 4, 5, 0,
5870 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005871
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005872 0, 0, 0, 0,
5873 0, 0, 0, 0,
5874 0, 0, 0, 0,
5875 0, 6, 7, 0,
5876 0, 8, 9, 0,
5877 0, 10, 11, 0,
5878 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005879
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005880 0, 0, 0, 0,
5881 0, 0, 0, 0,
5882 0, 0, 0, 0,
5883 0, 0, 0, 0,
5884 0, 0, 0, 0,
5885 0, 0, 0, 0,
5886 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005887
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005888 0, 0, 0, 0,
5889 0, 0, 0, 0,
5890 0, 0, 0, 0,
5891 0, 0, 0, 0,
5892 0, 0, 0, 0,
5893 0, 0, 0, 0,
5894 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005895
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005896 0, 0, 0, 0,
5897 0, 0, 0, 0,
5898 0, 0, 0, 0,
5899 0, 0, 0, 0,
5900 0, 0, 0, 0,
5901 0, 0, 0, 0,
5902 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005903
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005904 0, 0, 0, 0,
5905 0, 0, 0, 0,
5906 0, 0, 0, 0,
5907 0, 12, 13, 0,
5908 0, 14, 15, 0,
5909 0, 16, 17, 0,
5910 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005911
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005912 0, 0, 0, 0,
5913 0, 0, 0, 0,
5914 0, 0, 0, 0,
5915 0, 18, 19, 0,
5916 0, 20, 21, 0,
5917 0, 22, 23, 0,
5918 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005919
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005920 0, 0, 0, 0,
5921 0, 0, 0, 0,
5922 0, 0, 0, 0,
5923 0, 0, 0, 0,
5924 0, 0, 0, 0,
5925 0, 0, 0, 0,
5926 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005927
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005928 0, 0, 0, 0,
5929 0, 0, 0, 0,
5930 0, 0, 0, 0,
5931 0, 0, 0, 0,
5932 0, 0, 0, 0,
5933 0, 0, 0, 0,
5934 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005935
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005936 0, 0, 0, 0,
5937 0, 0, 0, 0,
5938 0, 0, 0, 0,
5939 0, 0, 0, 0,
5940 0, 0, 0, 0,
5941 0, 0, 0, 0,
5942 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005943
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005944 0, 0, 0, 0,
5945 0, 0, 0, 0,
5946 0, 0, 0, 0,
5947 0, 0, 0, 0,
5948 0, 0, 0, 0,
5949 0, 0, 0, 0,
5950 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005951
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005952 0, 0, 0, 0,
5953 0, 0, 0, 0,
5954 0, 0, 0, 0,
5955 0, 0, 0, 0,
5956 0, 0, 0, 0,
5957 0, 0, 0, 0,
5958 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005959
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005960 0, 0, 0, 0,
5961 0, 0, 0, 0,
5962 0, 0, 0, 0,
5963 0, 0, 0, 0,
5964 0, 0, 0, 0,
5965 0, 0, 0, 0,
5966 0, 0, 0, 0
5967 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005968
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005969 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005970
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005971 LayerTestResult<T, 4> result(outputTensorInfo);
5972 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005973
5974 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5975 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5976
5977 armnn::PadQueueDescriptor descriptor;
5978
5979 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5980 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5981 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5982 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
5983 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5984
5985 descriptor.m_Parameters.m_PadList = PadList;
5986 armnn::WorkloadInfo info;
5987
5988 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5989 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5990
5991 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5992
5993 inputHandle->Allocate();
5994 outputHandle->Allocate();
5995
5996 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5997
Derek Lambertif30f7d32019-04-09 10:25:02 +01005998 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005999 workload->Execute();
6000
6001 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6002
6003 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006004}
6005
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006006LayerTestResult<uint8_t, 2> PadUint82dTest(
6007 armnn::IWorkloadFactory& workloadFactory,
6008 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006009{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006010 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006011}
6012
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006013LayerTestResult<uint8_t, 3> PadUint83dTest(
6014 armnn::IWorkloadFactory& workloadFactory,
6015 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006016{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006017 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006018}
6019
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006020LayerTestResult<uint8_t, 4> PadUint84dTest(
6021 armnn::IWorkloadFactory& workloadFactory,
6022 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006023{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006024 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006025}
6026
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006027LayerTestResult<float, 2> PadFloat322dTest(
6028 armnn::IWorkloadFactory& workloadFactory,
6029 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006030{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006031 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006032}
6033
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006034LayerTestResult<float, 3> PadFloat323dTest(
6035 armnn::IWorkloadFactory& workloadFactory,
6036 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006037{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006038 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006039}
6040
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006041LayerTestResult<float, 4> PadFloat324dTest(
6042 armnn::IWorkloadFactory& workloadFactory,
6043 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006044{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006045 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006046}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006047
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006048LayerTestResult<float, 4> L2Normalization1dTest(
6049 armnn::IWorkloadFactory& workloadFactory,
6050 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006051 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006052{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006053 // Width: 1
6054 // Height: 1
6055 // Channels: 10
6056 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006057 unsigned int numberOfBatches = 1;
6058 unsigned int numberOfChannels = 10;
6059 unsigned int height = 1;
6060 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006061
jimfly013aab7c32018-11-12 13:32:08 +00006062
Nina Drozdd41b2592018-11-19 13:03:36 +00006063 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006064 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006065 std::vector<float> inputValues
6066 {
6067 // Batch 0, Channel 0, Height (1) x Width (1)
6068 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006069
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006070 // Batch 0, Channel 1, Height (1) x Width (1)
6071 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006072
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006073 // Batch 0, Channel 2, Height (1) x Width (1)
6074 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006075
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006076 // Batch 0, Channel 3, Height (1) x Width (1)
6077 4.0f,
6078
6079 // Batch 0, Channel 4, Height (1) x Width (1)
6080 5.0f,
6081
6082 // Batch 0, Channel 5, Height (1) x Width (1)
6083 6.0f,
6084
6085 // Batch 0, Channel 6, Height (1) x Width (1)
6086 7.0f,
6087
6088 // Batch 0, Channel 7, Height (1) x Width (1)
6089 8.0f,
6090
6091 // Batch 0, Channel 8, Height (1) x Width (1)
6092 9.0f,
6093
6094 // Batch 0, Channel 9, Height (1) x Width (1)
6095 10.0f
6096 };
telsoa014fcda012018-03-09 14:13:49 +00006097 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006098 std::vector<float> expectedOutputValues
6099 {
6100 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00006101 1.0f * approxInvL2Norm,
6102 2.0f * approxInvL2Norm,
6103 3.0f * approxInvL2Norm,
6104 4.0f * approxInvL2Norm,
6105 5.0f * approxInvL2Norm,
6106 6.0f * approxInvL2Norm,
6107 7.0f * approxInvL2Norm,
6108 8.0f * approxInvL2Norm,
6109 9.0f * approxInvL2Norm,
6110 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006111 };
telsoa014fcda012018-03-09 14:13:49 +00006112
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006113
6114 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006115 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006116}
6117
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006118LayerTestResult<float, 4> L2Normalization2dTest(
6119 armnn::IWorkloadFactory& workloadFactory,
6120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006121 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006122{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006123 // Width: 5
6124 // Height: 1
6125 // Channels: 2
6126 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006127 unsigned int numberOfBatches = 1;
6128 unsigned int numberOfChannels = 2;
6129 unsigned int height = 1;
6130 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006131
Nina Drozdd41b2592018-11-19 13:03:36 +00006132 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006133 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006134 std::vector<float> inputValues
6135 {
6136 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006137 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006138
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006139 // Batch 0, Channel 1, Height (1) x Width (5)
6140 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6141 };
6142 std::vector<float> expectedOutputValues
6143 {
6144 // Batch 0, Channel 0, Height (1) x Width (5)
6145 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6146 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6147 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6148 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006149 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
6150
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006151 // Batch 0, Channel 1, Height (1) x Width (5)
6152 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6153 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6154 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6155 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006156 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006157 };
telsoa014fcda012018-03-09 14:13:49 +00006158
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006159 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006160 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006161}
telsoa014fcda012018-03-09 14:13:49 +00006162
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006163LayerTestResult<float, 4> L2Normalization3dTest(
6164 armnn::IWorkloadFactory& workloadFactory,
6165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006166 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006167{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006168 // Width: 3
6169 // Height: 4
6170 // Channels: 2
6171 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006172 unsigned int numberOfBatches = 1;
6173 unsigned int numberOfChannels = 2;
6174 unsigned int height = 4;
6175 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006176
Nina Drozdd41b2592018-11-19 13:03:36 +00006177 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006178 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006179 std::vector<float> inputValues
6180 {
6181 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006182 119.0f, 21.0f, 150.0f,
6183 149.0f, 32.0f, 179.0f,
6184 15.0f, 227.0f, 141.0f,
6185 147.0f, 199.0f, 220.0f,
6186
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006187 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006188 110.0f, 140.0f, 73.0f,
6189 211.0f, 212.0f, 89.0f,
6190 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006191 162.0f, 12.0f, 161.0f
6192 };
6193 std::vector<float> expectedOutputValues
6194 {
6195 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006196 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6197 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6198 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6199 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6200 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6201 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6202 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6203 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6204 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6205 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6206 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6207 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6208
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006209 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006210 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6211 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6212 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6213 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6214 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6215 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6216 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6217 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6218 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6219 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6220 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006221 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6222 };
telsoa014fcda012018-03-09 14:13:49 +00006223
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006224 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006225 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006226}
telsoa014fcda012018-03-09 14:13:49 +00006227
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006228LayerTestResult<float, 4> L2Normalization4dTest(
6229 armnn::IWorkloadFactory& workloadFactory,
6230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006231 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006232{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006233 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006234 // Height: 4
6235 // Channels: 3
6236 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006237 unsigned int numberOfBatches = 2;
6238 unsigned int numberOfChannels = 3;
6239 unsigned int height = 4;
6240 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006241
Nina Drozdd41b2592018-11-19 13:03:36 +00006242 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006243 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006244 std::vector<float> inputValues
6245 {
6246 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006247 235.0f, 46.0f, 178.0f,
6248 100.0f, 123.0f, 19.0f,
6249 172.0f, 74.0f, 250.0f,
6250 6.0f, 195.0f, 80.0f,
6251
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006252 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006253 113.0f, 95.0f, 202.0f,
6254 77.0f, 114.0f, 71.0f,
6255 122.0f, 246.0f, 166.0f,
6256 82.0f, 28.0f, 37.0f,
6257
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006258 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006259 56.0f, 170.0f, 162.0f,
6260 194.0f, 89.0f, 254.0f,
6261 12.0f, 209.0f, 200.0f,
6262 1.0f, 64.0f, 54.0f,
6263
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006264 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006265 67.0f, 90.0f, 49.0f,
6266 7.0f, 163.0f, 18.0f,
6267 25.0f, 117.0f, 103.0f,
6268 247.0f, 59.0f, 189.0f,
6269
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006270 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006271 239.0f, 104.0f, 199.0f,
6272 17.0f, 124.0f, 153.0f,
6273 222.0f, 217.0f, 75.0f,
6274 32.0f, 126.0f, 21.0f,
6275
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006276 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006277 97.0f, 145.0f, 215.0f,
6278 115.0f, 116.0f, 238.0f,
6279 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006280 92.0f, 125.0f, 88.0f
6281 };
6282 std::vector<float> expectedOutputValues
6283 {
6284 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006285 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6286 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6287 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6288 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6289 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6290 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6291 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6292 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6293 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6294 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6295 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6296 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6297
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006298 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006299 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6300 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6301 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6302 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6303 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6304 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6305 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6306 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6307 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6308 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6309 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6310 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6311
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006312 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006313 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6314 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6315 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6316 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6317 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6318 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6319 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6320 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6321 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6322 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6323 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6324 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6325
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006326 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006327 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6328 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6329 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6330 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6331 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6332 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6333 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6334 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6335 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6336 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6337 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6338 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6339
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006340 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006341 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6342 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6343 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6344 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6345 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6346 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6347 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6348 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6349 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6350 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6351 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6352 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6353
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006354 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006355 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6356 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6357 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6358 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6359 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6360 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6361 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6362 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6363 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6364 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6365 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006366 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
6367 };
telsoa014fcda012018-03-09 14:13:49 +00006368
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006369 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006370 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006371}
6372
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006373template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006374LayerTestResult<T, 4> ConstantTestImpl(
6375 armnn::IWorkloadFactory& workloadFactory,
6376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00006377 float qScale,
6378 int32_t qOffset)
6379{
6380 constexpr unsigned int inputWidth = 3;
6381 constexpr unsigned int inputHeight = 4;
6382 constexpr unsigned int inputChannels = 3;
6383 constexpr unsigned int inputBatchSize = 2;
6384
6385 constexpr unsigned int outputWidth = inputWidth;
6386 constexpr unsigned int outputHeight = inputHeight;
6387 constexpr unsigned int outputChannels = inputChannels;
6388 constexpr unsigned int outputBatchSize = inputBatchSize;
6389
Nina Drozd58ef2c62019-05-16 12:09:18 +01006390 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6391 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006392
Nina Drozd58ef2c62019-05-16 12:09:18 +01006393 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6394 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006395
6396 // Set quantization parameters if the requested type is a quantized type.
6397 if(armnn::IsQuantizedType<T>())
6398 {
6399 inputTensorInfo.SetQuantizationScale(qScale);
6400 inputTensorInfo.SetQuantizationOffset(qOffset);
6401 outputTensorInfo.SetQuantizationScale(qScale);
6402 outputTensorInfo.SetQuantizationOffset(qOffset);
6403 }
6404
6405 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
6406 QuantizedVector<T>(qScale, qOffset, {
6407 // Batch 0, Channel 0
6408 235.0f, 46.0f, 178.0f,
6409 100.0f, 123.0f, 19.0f,
6410 172.0f, 74.0f, 250.0f,
6411 6.0f, 195.0f, 80.0f,
6412
6413 // Batch 0, Channel 1
6414 113.0f, 95.0f, 202.0f,
6415 77.0f, 114.0f, 71.0f,
6416 122.0f, 246.0f, 166.0f,
6417 82.0f, 28.0f, 37.0f,
6418
6419 // Batch 0, Channel 2
6420 56.0f, 170.0f, 162.0f,
6421 194.0f, 89.0f, 254.0f,
6422 12.0f, 209.0f, 200.0f,
6423 1.0f, 64.0f, 54.0f,
6424
6425 // Batch 1, Channel 0
6426 67.0f, 90.0f, 49.0f,
6427 7.0f, 163.0f, 18.0f,
6428 25.0f, 117.0f, 103.0f,
6429 247.0f, 59.0f, 189.0f,
6430
6431 // Batch 1, Channel 1
6432 239.0f, 104.0f, 199.0f,
6433 17.0f, 124.0f, 153.0f,
6434 222.0f, 217.0f, 75.0f,
6435 32.0f, 126.0f, 21.0f,
6436
6437 // Batch 1, Channel 2
6438 97.0f, 145.0f, 215.0f,
6439 115.0f, 116.0f, 238.0f,
6440 226.0f, 16.0f, 132.0f,
6441 92.0f, 125.0f, 88.0f,
6442 })));
6443
6444 LayerTestResult<T, 4> result(outputTensorInfo);
6445 result.outputExpected = input;
6446
6447 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6448
6449 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
6450 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
6451
6452 armnn::ConstantQueueDescriptor descriptor;
6453 descriptor.m_LayerOutput = &constantTensor;
6454
6455 armnn::WorkloadInfo info;
6456 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6457
6458 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6459
6460 outputHandle->Allocate();
6461
Derek Lambertif30f7d32019-04-09 10:25:02 +01006462 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006463 workload->Execute();
6464
6465 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6466 return result;
6467}
6468
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006469LayerTestResult<float, 4> ConstantTest(
6470 armnn::IWorkloadFactory& workloadFactory,
6471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006472{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006473 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006474}
6475
Nina Drozd58ef2c62019-05-16 12:09:18 +01006476LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
6477 armnn::IWorkloadFactory& workloadFactory,
6478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6479{
6480 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
6481}
6482
6483LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006484 armnn::IWorkloadFactory& workloadFactory,
6485 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006486{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006487 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006488}
6489
Ferran Balaguerb2845652019-02-27 09:42:06 +00006490LayerTestResult<uint8_t, 3> MergerUint8DifferentQParamsTest(
6491 armnn::IWorkloadFactory& workloadFactory,
6492 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6493{
6494 unsigned int outputWidth = 3;
6495 unsigned int outputHeight = 6;
6496 unsigned int outputChannels = 3;
6497
6498 unsigned int inputWidth1 = 3;
6499 unsigned int inputHeight1 = 6;
6500 unsigned int inputChannels1 = 2;
6501
6502 unsigned int inputWidth2 = 3;
6503 unsigned int inputHeight2 = 6;
6504 unsigned int inputChannels2 = 1;
6505
6506 // Defines the tensor descriptors.
6507 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6508 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6509 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6510
6511 // Quantized input1 tensor. Range [-3, 1]
6512 const float inputScale1 = 0.015686f;
6513 const int32_t inputOffset1 = 192;
6514
6515 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6516 {
6517 1, 2, 3,
6518 4, 5, 6,
6519 7, 8, 9,
6520 10, 11, 12,
6521 13, 14, 15,
6522 16, 17, 18,
6523
6524 19, 20, 21,
6525 22, 23, 24,
6526 25, 26, 27,
6527 28, 29, 30,
6528 31, 32, 33,
6529 34, 35, 36,
6530 })
6531 );
6532
6533 // Quatized input2 tensor. Range [-1, 4]
6534 const float inputScale2 = 0.019608f;
6535 const int32_t inputOffset2 = 50;
6536
6537 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6538 {
6539 37, 38, 39,
6540 40, 41, 42,
6541 43, 44, 45,
6542 46, 47, 48,
6543 49, 50, 51,
6544 52, 53, 54,
6545 })
6546 );
6547
6548 // Output has the same quantization parameters than input1,
6549 // so that only the requantization of input2 is required
6550 const float outputScale = 0.015686f;
6551 const int32_t outputOffset = 192;
6552
6553 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6554
6555 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
6556 {
6557 1, 2, 3,
6558 4, 5, 6,
6559 7, 8, 9,
6560 10, 11, 12,
6561 13, 14, 15,
6562 16, 17, 18,
6563
6564 19, 20, 21,
6565 22, 23, 24,
6566 25, 26, 27,
6567 28, 29, 30,
6568 31, 32, 33,
6569 34, 35, 36,
6570
6571 176, 177, 178,
6572 179, 181, 182,
6573 183, 184, 186,
6574 187, 188, 189,
6575 191, 192, 193,
6576 195, 196, 197,
6577 })
6578 );
6579
6580 outputTensorInfo.SetQuantizationScale(outputScale);
6581 outputTensorInfo.SetQuantizationOffset(outputOffset);
6582 inputTensorInfo1.SetQuantizationScale(inputScale1);
6583 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
6584 inputTensorInfo2.SetQuantizationScale(inputScale2);
6585 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
6586
6587 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
6588 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6589
6590 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
6591 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6592
6593 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6594
6595 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6596
6597 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6598 subTensorsSupported ?
6599 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6600 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6601
6602 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6603 subTensorsSupported ?
6604 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6605 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6606
6607 armnn::MergerQueueDescriptor data;
6608 armnn::WorkloadInfo info;
6609 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6610 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6611 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6612
6613 data.m_ViewOrigins.push_back(window1);
6614 data.m_ViewOrigins.push_back(window2);
6615
6616 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6617
6618 inputHandle1->Allocate();
6619 inputHandle2->Allocate();
6620 outputHandle->Allocate();
6621
6622 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6623 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6624
Derek Lambertif30f7d32019-04-09 10:25:02 +01006625 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00006626 workload->Execute();
6627
6628 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6629
6630 return ret;
6631}
6632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006633LayerTestResult<uint8_t, 3> MergerUint8Test(
6634 armnn::IWorkloadFactory& workloadFactory,
6635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006636{
surmeh013537c2c2018-05-18 16:31:43 +01006637 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00006638 unsigned int outputHeight = 6;
6639 unsigned int outputChannels = 3;
6640
surmeh013537c2c2018-05-18 16:31:43 +01006641 unsigned int inputWidth1 = 3;
6642 unsigned int inputHeight1 = 6;
6643 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00006644
surmeh013537c2c2018-05-18 16:31:43 +01006645 unsigned int inputWidth2 = 3;
6646 unsigned int inputHeight2 = 6;
6647 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00006648
telsoa01c577f2c2018-08-31 09:22:23 +01006649 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00006650 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6651 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6652 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006653
telsoa01c577f2c2018-08-31 09:22:23 +01006654 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00006655 const float scale = 0.13497836f;
6656 const int32_t offset = -7;
6657
6658 outputTensorInfo.SetQuantizationScale(scale);
6659 outputTensorInfo.SetQuantizationOffset(offset);
6660 inputTensorInfo1.SetQuantizationScale(scale);
6661 inputTensorInfo1.SetQuantizationOffset(offset);
6662 inputTensorInfo2.SetQuantizationScale(scale);
6663 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00006664
6665 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6666
6667 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01006668 {
6669 1, 2, 3,
6670 4, 5, 6,
6671 7, 8, 9,
6672 10, 11, 12,
6673 13, 14, 15,
6674 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006675
surmeh013537c2c2018-05-18 16:31:43 +01006676 19, 20, 21,
6677 22, 23, 24,
6678 25, 26, 27,
6679 28, 29, 30,
6680 31, 32, 33,
6681 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006682
surmeh013537c2c2018-05-18 16:31:43 +01006683 37, 38, 39,
6684 40, 41, 42,
6685 43, 44, 45,
6686 46, 47, 48,
6687 49, 50, 51,
6688 52, 53, 54,
6689 })
telsoa014fcda012018-03-09 14:13:49 +00006690 );
6691
telsoa014fcda012018-03-09 14:13:49 +00006692 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6693 {
surmeh013537c2c2018-05-18 16:31:43 +01006694 1, 2, 3,
6695 4, 5, 6,
6696 7, 8, 9,
6697 10, 11, 12,
6698 13, 14, 15,
6699 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006700
surmeh013537c2c2018-05-18 16:31:43 +01006701 19, 20, 21,
6702 22, 23, 24,
6703 25, 26, 27,
6704 28, 29, 30,
6705 31, 32, 33,
6706 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006707 })
6708 );
6709
6710 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6711 {
surmeh013537c2c2018-05-18 16:31:43 +01006712 37, 38, 39,
6713 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00006714 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01006715 46, 47, 48,
6716 49, 50, 51,
6717 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00006718 })
6719 );
6720
telsoa01c577f2c2018-08-31 09:22:23 +01006721 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00006722 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6723
telsoa01c577f2c2018-08-31 09:22:23 +01006724 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00006725 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6726
telsoa014fcda012018-03-09 14:13:49 +00006727
6728 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6729
6730 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6731
6732 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6733 subTensorsSupported ?
6734 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6735 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6736
6737 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6738 subTensorsSupported ?
6739 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6740 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6741
telsoa014fcda012018-03-09 14:13:49 +00006742
6743 armnn::MergerQueueDescriptor data;
6744 armnn::WorkloadInfo info;
6745 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6746 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00006747 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6748
6749 data.m_ViewOrigins.push_back(window1);
6750 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00006751
6752 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6753
6754 inputHandle1->Allocate();
6755 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006756 outputHandle->Allocate();
6757
6758 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6759 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006760
Derek Lambertif30f7d32019-04-09 10:25:02 +01006761 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006762 workload->Execute();
6763
6764 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6765
6766 return ret;
6767}
6768
Jim Flynncbb66aa2019-05-15 13:03:54 +01006769LayerTestResult<uint16_t, 3> MergerUint16Test(
6770 armnn::IWorkloadFactory& workloadFactory,
6771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6772{
6773 unsigned int outputWidth = 3;
6774 unsigned int outputHeight = 6;
6775 unsigned int outputChannels = 3;
6776
6777 unsigned int inputWidth1 = 3;
6778 unsigned int inputHeight1 = 6;
6779 unsigned int inputChannels1 = 2;
6780
6781 unsigned int inputWidth2 = 3;
6782 unsigned int inputHeight2 = 6;
6783 unsigned int inputChannels2 = 1;
6784
6785 // Defines the tensor descriptors.
6786 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
6787 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
6788 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
6789
6790 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
6791 const float scale = 0.13497836f;
6792 const int32_t offset = -7;
6793
6794 outputTensorInfo.SetQuantizationScale(scale);
6795 outputTensorInfo.SetQuantizationOffset(offset);
6796 inputTensorInfo1.SetQuantizationScale(scale);
6797 inputTensorInfo1.SetQuantizationOffset(offset);
6798 inputTensorInfo2.SetQuantizationScale(scale);
6799 inputTensorInfo2.SetQuantizationOffset(offset);
6800
6801 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
6802
6803 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
6804 {
6805 1, 2, 3,
6806 4, 5, 6,
6807 7, 8, 9,
6808 10, 11, 12,
6809 13, 14, 15,
6810 16, 17, 18,
6811
6812 19, 20, 21,
6813 22, 23, 24,
6814 25, 26, 27,
6815 28, 29, 30,
6816 31, 32, 33,
6817 34, 35, 36,
6818
6819 37, 38, 39,
6820 40, 41, 42,
6821 43, 44, 45,
6822 46, 47, 48,
6823 49, 50, 51,
6824 52, 53, 54,
6825 }));
6826
6827 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
6828 {
6829 1, 2, 3,
6830 4, 5, 6,
6831 7, 8, 9,
6832 10, 11, 12,
6833 13, 14, 15,
6834 16, 17, 18,
6835
6836 19, 20, 21,
6837 22, 23, 24,
6838 25, 26, 27,
6839 28, 29, 30,
6840 31, 32, 33,
6841 34, 35, 36,
6842 }));
6843
6844 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
6845 {
6846 37, 38, 39,
6847 40, 41, 42,
6848 43, 44, 45,
6849 46, 47, 48,
6850 49, 50, 51,
6851 52, 53, 54,
6852 }));
6853
6854 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
6855 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6856
6857 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
6858 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6859
6860
6861 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6862
6863 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6864
6865 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6866 subTensorsSupported ?
6867 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6868 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6869
6870 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6871 subTensorsSupported ?
6872 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6873 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6874
6875
6876 armnn::MergerQueueDescriptor data;
6877 armnn::WorkloadInfo info;
6878 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6879 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6880 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6881
6882 data.m_ViewOrigins.push_back(window1);
6883 data.m_ViewOrigins.push_back(window2);
6884
6885 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6886
6887 inputHandle1->Allocate();
6888 inputHandle2->Allocate();
6889 outputHandle->Allocate();
6890
6891 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6892 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6893
6894 workload->PostAllocationConfigure();
6895 workload->Execute();
6896
6897 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6898
6899 return ret;
6900}
telsoa014fcda012018-03-09 14:13:49 +00006901
surmeh01bceff2f2018-03-29 16:29:27 +01006902namespace
telsoa014fcda012018-03-09 14:13:49 +00006903{
Sadik Armagan2999a022019-04-09 14:20:12 +01006904template <typename T>
6905LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006906 armnn::IWorkloadFactory& workloadFactory,
6907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6908 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006909 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006910 float scale0,
6911 int32_t offset0,
6912 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006913 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006914 float scale1,
6915 int32_t offset1,
6916 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006917 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006918 float outScale,
6919 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01006920{
Sadik Armagan2999a022019-04-09 14:20:12 +01006921 auto dataType = (std::is_same<T, uint8_t>::value ?
6922 armnn::DataType::QuantisedAsymm8 :
6923 armnn::DataType::QuantisedSymm16);
6924
6925 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
6926 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
6927 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00006928
surmeh01bceff2f2018-03-29 16:29:27 +01006929 inputTensorInfo0.SetQuantizationScale(scale0);
6930 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00006931
surmeh01bceff2f2018-03-29 16:29:27 +01006932 inputTensorInfo1.SetQuantizationScale(scale1);
6933 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00006934
surmeh01bceff2f2018-03-29 16:29:27 +01006935 outputTensorInfo.SetQuantizationScale(outScale);
6936 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00006937
Sadik Armagan2999a022019-04-09 14:20:12 +01006938 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
6939 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00006940
Sadik Armagan2999a022019-04-09 14:20:12 +01006941 LayerTestResult<T, 4> result(outputTensorInfo);
6942 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
6943
6944 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
6945 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6946 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6947
6948 armnn::AdditionQueueDescriptor data;
6949 armnn::WorkloadInfo info;
6950 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6951 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6952 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6953
6954 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
6955
6956 inputHandle0->Allocate();
6957 inputHandle1->Allocate();
6958 outputHandle->Allocate();
6959
6960 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
6961 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
6962
Derek Lambertif30f7d32019-04-09 10:25:02 +01006963 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01006964 workload->Execute();
6965
6966 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6967
6968 return result;
6969}
6970} // anonymous namespace
6971
6972LayerTestResult<uint8_t, 4> AdditionUint8Test(
6973 armnn::IWorkloadFactory& workloadFactory,
6974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6975{
6976 const unsigned int shape0[] = { 1, 2, 2, 3 };
6977 const unsigned int shape1[] = { 1, 2, 2, 3 };
6978
6979 std::vector<uint8_t> input0(
6980 {
6981 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
6982 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
6983 });
6984
6985 std::vector<uint8_t> input1(
6986 {
6987 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
6988 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
6989 });
6990
6991 std::vector<uint8_t> output(
6992 {
6993 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
6994 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
6995 });
6996
6997 return AdditionQuantizeTestHelper(workloadFactory,
6998 memoryManager,
6999 shape0, input0, 7.0f, 3,
7000 shape1, input1, 7.0f, 3,
7001 shape0, output, 7.0f, 3);
7002}
7003
7004LayerTestResult<int16_t, 4> AdditionInt16Test(
7005 armnn::IWorkloadFactory& workloadFactory,
7006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7007{
7008 const unsigned int shape0[] = { 1, 2, 2, 3 };
7009 const unsigned int shape1[] = { 1, 2, 2, 3 };
7010
7011 std::vector<int16_t> input0(
7012 {
7013 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7014 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7015 });
7016
7017 std::vector<int16_t> input1(
7018 {
7019 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7020 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7021 });
7022
7023 std::vector<int16_t> output(
7024 {
7025 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7026 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7027 });
7028
7029 return AdditionQuantizeTestHelper(workloadFactory,
7030 memoryManager,
7031 shape0, input0, 7.0f, 0,
7032 shape1, input1, 7.0f, 0,
7033 shape0, output, 7.0f, 0);
7034}
7035
7036namespace
7037{
7038template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7039LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7040 armnn::IWorkloadFactory& workloadFactory,
7041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7042 const unsigned int shape0[4],
7043 const std::vector<T> & values0,
7044 float scale0,
7045 int32_t offset0,
7046 const unsigned int shape1[4],
7047 const std::vector<T> & values1,
7048 float scale1,
7049 int32_t offset1,
7050 const unsigned int outShape[4],
7051 const std::vector<T> & outValues,
7052 float outScale,
7053 int32_t outOffset)
7054{
7055 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7056 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7057 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7058
7059 inputTensorInfo0.SetQuantizationScale(scale0);
7060 inputTensorInfo0.SetQuantizationOffset(offset0);
7061
7062 inputTensorInfo1.SetQuantizationScale(scale1);
7063 inputTensorInfo1.SetQuantizationOffset(offset1);
7064
7065 outputTensorInfo.SetQuantizationScale(outScale);
7066 outputTensorInfo.SetQuantizationOffset(outOffset);
7067
7068 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7069 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7070
7071 LayerTestResult<T, 4> result(outputTensorInfo);
7072 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007073
surmeh01bceff2f2018-03-29 16:29:27 +01007074 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007075 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007076 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7077
7078 armnn::MultiplicationQueueDescriptor data;
7079 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007080 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7081 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007082 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7083
7084 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7085
surmeh01bceff2f2018-03-29 16:29:27 +01007086 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007087 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007088 outputHandle->Allocate();
7089
surmeh01bceff2f2018-03-29 16:29:27 +01007090 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007091 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007092
Derek Lambertif30f7d32019-04-09 10:25:02 +01007093 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007094 workload->Execute();
7095
7096 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7097
7098 return result;
7099}
surmeh01bceff2f2018-03-29 16:29:27 +01007100} // anonymous namespace
7101
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007102LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7103 armnn::IWorkloadFactory& workloadFactory,
7104 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007105{
7106 unsigned int batchSize = 1;
7107 unsigned int channels = 2;
7108 unsigned int height = 2;
7109 unsigned int width = 3;
7110 const unsigned int shape[] = { batchSize, channels, height, width };
7111
telsoa01c577f2c2018-08-31 09:22:23 +01007112 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007113 std::vector<uint8_t> input0({
7114 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7115 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7116 });
7117
telsoa01c577f2c2018-08-31 09:22:23 +01007118 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007119 std::vector<uint8_t> input1({
7120 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7121 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7122 });
7123
telsoa01c577f2c2018-08-31 09:22:23 +01007124 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007125 std::vector<uint8_t> output(
7126 {
7127 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7128 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7129 });
7130
Sadik Armagan2999a022019-04-09 14:20:12 +01007131 // Scale/offset chosen to have output values out of range.
7132 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7133 memoryManager,
7134 shape,
7135 input0,
7136 4.0f,
7137 1,
7138 shape,
7139 input1,
7140 3.0f,
7141 -2,
7142 shape,
7143 output,
7144 1366.255f,
7145 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007146}
7147
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007148LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7149 armnn::IWorkloadFactory& workloadFactory,
7150 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007151{
7152 const unsigned int shape0[] = { 1, 2, 2, 3 };
7153 const unsigned int shape1[] = { 1, 1, 1, 1 };
7154
7155 std::vector<uint8_t> input0({
7156 1, 2, 3, 4, 5, 6,
7157 7, 8, 9, 10, 11, 12
7158 });
7159
7160 std::vector<uint8_t> input1({2});
7161
7162 std::vector<uint8_t> output({
7163 2, 4, 6, 8, 10, 12,
7164 14, 16, 18, 20, 22, 24
7165 });
7166
Sadik Armagan2999a022019-04-09 14:20:12 +01007167 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7168 memoryManager,
7169 shape0,
7170 input0,
7171 1.0f,
7172 0,
7173 shape1,
7174 input1,
7175 1.0f,
7176 0,
7177 shape0,
7178 output,
7179 1.0f,
7180 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007181}
7182
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007183LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7184 armnn::IWorkloadFactory& workloadFactory,
7185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007186{
7187 const unsigned int shape0[] = { 1, 2, 2, 3 };
7188 const unsigned int shape1[] = { 1, 1, 1, 3 };
7189
7190 std::vector<uint8_t> input0({
7191 1, 2, 3, 4, 5, 6,
7192 7, 8, 9, 10, 11, 12
7193 });
7194
7195 std::vector<uint8_t> input1({1, 2, 3});
7196
7197 std::vector<uint8_t> output({
7198 1, 4, 9, 4, 10, 18,
7199 7, 16, 27, 10, 22, 36
7200 });
7201
Sadik Armagan2999a022019-04-09 14:20:12 +01007202 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7203 memoryManager,
7204 shape0,
7205 input0,
7206 1.0f,
7207 0,
7208 shape1,
7209 input1,
7210 1.0f,
7211 0,
7212 shape0,
7213 output,
7214 1.0f,
7215 0);
7216}
7217
7218LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7219 armnn::IWorkloadFactory& workloadFactory,
7220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7221{
7222 const unsigned int shape[] = { 1, 2, 2, 3 };
7223
7224 std::vector<int16_t> input0(
7225 {
7226 6, 7, 8, 9, 10, 11,
7227 12, 13, 14, 15, 16, 17
7228 });
7229
7230 std::vector<int16_t> input1(
7231 {
7232 1, 2, 3, 4, 5, 6,
7233 7, 8, 9, 10, 11, 12
7234 });
7235
7236 std::vector<int16_t> output(
7237 {
7238 6, 14, 24, 36, 50, 66,
7239 84, 104, 126, 150, 176, 204
7240 });
7241
7242 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7243 memoryManager,
7244 shape,
7245 input0,
7246 1.0f,
7247 0,
7248 shape,
7249 input1,
7250 1.0f,
7251 0,
7252 shape,
7253 output,
7254 1.0f,
7255 0);
7256}
7257
7258LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
7259 armnn::IWorkloadFactory& workloadFactory,
7260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7261{
7262 const unsigned int shape0[] = { 1, 2, 2, 3 };
7263 const unsigned int shape1[] = { 1, 1, 1, 1 };
7264
7265 std::vector<int16_t> input0(
7266 {
7267 1, 2, 3, 4, 5, 6,
7268 7, 8, 9, 10, 11, 12
7269 });
7270
7271 std::vector<int16_t> input1({2});
7272
7273 std::vector<int16_t> output(
7274 {
7275 2, 4, 6, 8, 10, 12,
7276 14, 16, 18, 20, 22, 24
7277 });
7278
7279 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7280 memoryManager,
7281 shape0,
7282 input0,
7283 1.0f,
7284 0,
7285 shape1,
7286 input1,
7287 1.0f,
7288 0,
7289 shape0,
7290 output,
7291 1.0f,
7292 0);
7293}
7294
7295LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7296 armnn::IWorkloadFactory& workloadFactory,
7297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7298{
7299 const unsigned int shape0[] = { 1, 2, 2, 3 };
7300 const unsigned int shape1[] = { 1, 1, 1, 3 };
7301
7302 std::vector<int16_t> input0(
7303 {
7304 1, 2, 3, 4, 5, 6,
7305 7, 8, 9, 10, 11, 12
7306 });
7307
7308 std::vector<int16_t> input1({1, 2, 3});
7309
7310 std::vector<int16_t> output(
7311 {
7312 1, 4, 9, 4, 10, 18,
7313 7, 16, 27, 10, 22, 36
7314 });
7315
7316 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7317 memoryManager,
7318 shape0,
7319 input0,
7320 1.0f,
7321 0,
7322 shape1,
7323 input1,
7324 1.0f,
7325 0,
7326 shape0,
7327 output,
7328 1.0f,
7329 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007330}
telsoa014fcda012018-03-09 14:13:49 +00007331
David Beckf195f032018-09-06 16:46:34 +01007332namespace
7333{
Sadik Armagan2999a022019-04-09 14:20:12 +01007334template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007335LayerTestResult<T, 4> SubtractionTestHelper(
7336 armnn::IWorkloadFactory& workloadFactory,
7337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7338 const unsigned int shape0[4],
7339 const std::vector<T>& values0,
7340 float scale0,
7341 int32_t offset0,
7342 const unsigned int shape1[4],
7343 const std::vector<T> & values1,
7344 float scale1,
7345 int32_t offset1,
7346 const unsigned int outShape[4],
7347 const std::vector<T> & outValues,
7348 float outScale,
7349 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01007350{
Sadik Armagan2999a022019-04-09 14:20:12 +01007351 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7352 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7353 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01007354
7355 inputTensorInfo0.SetQuantizationScale(scale0);
7356 inputTensorInfo0.SetQuantizationOffset(offset0);
7357
7358 inputTensorInfo1.SetQuantizationScale(scale1);
7359 inputTensorInfo1.SetQuantizationOffset(offset1);
7360
7361 outputTensorInfo.SetQuantizationScale(outScale);
7362 outputTensorInfo.SetQuantizationOffset(outOffset);
7363
7364 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7365 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7366
7367 LayerTestResult<T, 4> result(outputTensorInfo);
7368 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7369
7370 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7371 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7372 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7373
7374 armnn::SubtractionQueueDescriptor data;
7375 armnn::WorkloadInfo info;
7376 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7377 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7378 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7379
7380 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
7381
7382 inputHandle0->Allocate();
7383 inputHandle1->Allocate();
7384 outputHandle->Allocate();
7385
7386 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7387 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7388
Derek Lambertif30f7d32019-04-09 10:25:02 +01007389 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01007390 workload->Execute();
7391
7392 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7393
7394 return result;
7395}
7396} // anonymous namespace
7397
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007398LayerTestResult<uint8_t, 4> SubtractionUint8Test(
7399 armnn::IWorkloadFactory& workloadFactory,
7400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007401{
7402 const unsigned int shape0[] = { 1, 1, 2, 2 };
7403 const unsigned int shape1[] = { 1, 1, 2, 2 };
7404
7405 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7406 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
7407 std::vector<uint8_t> output({ 3, 3, 5, 5 });
7408
Sadik Armagan2999a022019-04-09 14:20:12 +01007409 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7410 memoryManager,
7411 shape0, input0, 0.5f, 2,
7412 shape1, input1, 1.0f, 0,
7413 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007414}
7415
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007416LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
7417 armnn::IWorkloadFactory& workloadFactory,
7418 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007419{
7420 const unsigned int shape0[] = { 1, 1, 2, 2 };
7421 const unsigned int shape1[] = { 1, 1, 1, 1 };
7422
7423 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7424 std::vector<uint8_t> input1({ 2 });
7425 std::vector<uint8_t> output({ 5, 6, 7, 8 });
7426
Sadik Armagan2999a022019-04-09 14:20:12 +01007427 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7428 memoryManager,
7429 shape0, input0, 0.5f, 2,
7430 shape1, input1, 1.0f, 0,
7431 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01007432}
7433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007434LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
7435 armnn::IWorkloadFactory& workloadFactory,
7436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007437{
7438 const unsigned int shape0[] = { 1, 1, 2, 2 };
7439 const unsigned int shape1[] = { 1, 1, 2, 1 };
7440
7441 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7442 std::vector<uint8_t> input1({ 2, 1 });
7443 std::vector<uint8_t> output({ 8, 11, 12, 15 });
7444
Sadik Armagan2999a022019-04-09 14:20:12 +01007445 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7446 memoryManager,
7447 shape0, input0, 1.0f, 0,
7448 shape1, input1, 1.0f, 0,
7449 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007450}
7451
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007452LayerTestResult<float, 4> SubtractionTest(
7453 armnn::IWorkloadFactory& workloadFactory,
7454 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007455{
7456 const unsigned int shape0[] = { 1, 1, 2, 2 };
7457 const unsigned int shape1[] = { 1, 1, 2, 2 };
7458
7459 std::vector<float> input0({ 1, 2, 3, 4 });
7460 std::vector<float> input1({ 1, -1, 0, 2 });
7461 std::vector<float> output({ 0, 3, 3, 2 });
7462
Sadik Armagan2999a022019-04-09 14:20:12 +01007463 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7464 memoryManager,
7465 shape0, input0, 1.0f, 0,
7466 shape1, input1, 1.0f, 0,
7467 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007468}
7469
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007470LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
7471 armnn::IWorkloadFactory& workloadFactory,
7472 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007473{
7474 const unsigned int shape0[] = { 1, 1, 2, 2 };
7475 const unsigned int shape1[] = { 1, 1, 1, 1 };
7476
7477 std::vector<float> input0({ 1, 2, 3, 4 });
7478 std::vector<float> input1({ 10 });
7479 std::vector<float> output({ -9, -8, -7, -6 });
7480
Sadik Armagan2999a022019-04-09 14:20:12 +01007481 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7482 memoryManager,
7483 shape0, input0, 1.0f, 0,
7484 shape1, input1, 1.0f, 0,
7485 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007486}
7487
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007488LayerTestResult<float, 4> SubtractionBroadcastTest(
7489 armnn::IWorkloadFactory& workloadFactory,
7490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007491{
7492 const unsigned int shape0[] = { 1, 1, 2, 2 };
7493 const unsigned int shape1[] = { 1, 1, 1, 2 };
7494
7495 std::vector<float> input0({ 1, 2, 3, 4 });
7496 std::vector<float> input1({ 10, -5 });
7497 std::vector<float> output({ -9, 7, -7, 9 });
7498
Sadik Armagan2999a022019-04-09 14:20:12 +01007499 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7500 memoryManager,
7501 shape0, input0, 1.0f, 0,
7502 shape1, input1, 1.0f, 0,
7503 shape0, output, 1.0f, 0);
7504}
7505
7506LayerTestResult<int16_t, 4> SubtractionInt16Test(
7507 armnn::IWorkloadFactory& workloadFactory,
7508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7509{
7510 const unsigned int shape0[] = { 1, 1, 2, 2 };
7511 const unsigned int shape1[] = { 1, 1, 2, 2 };
7512
7513 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7514 std::vector<int16_t> input1({ 1, 2, 1, 2 });
7515 std::vector<int16_t> output({ 3, 3, 5, 5 });
7516
7517 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7518 memoryManager,
7519 shape0, input0, 0.5f, 0,
7520 shape1, input1, 1.0f, 0,
7521 shape0, output, 1.0f, 0);
7522}
7523
7524LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
7525 armnn::IWorkloadFactory& workloadFactory,
7526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7527{
7528 const unsigned int shape0[] = { 1, 1, 2, 2 };
7529 const unsigned int shape1[] = { 1, 1, 1, 1 };
7530
7531 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7532 std::vector<int16_t> input1({ 2 });
7533 std::vector<int16_t> output({ 3, 4, 5, 6 });
7534
7535 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7536 memoryManager,
7537 shape0, input0, 0.5f, 0,
7538 shape1, input1, 1.0f, 0,
7539 shape0, output, 1.0f, 0);
7540}
7541
7542LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
7543 armnn::IWorkloadFactory& workloadFactory,
7544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7545{
7546 const unsigned int shape0[] = { 1, 1, 2, 2 };
7547 const unsigned int shape1[] = { 1, 1, 2, 1 };
7548
7549 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7550 std::vector<int16_t> input1({ 2, 1 });
7551 std::vector<int16_t> output({ 8, 11, 12, 15 });
7552
7553 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7554 memoryManager,
7555 shape0, input0, 1.0f, 0,
7556 shape1, input1, 1.0f, 0,
7557 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007558}
7559
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007560LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
7561 armnn::IWorkloadFactory& workloadFactory,
7562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007563{
7564 constexpr unsigned int inputWidth = 4;
7565 constexpr unsigned int inputHeight = 4;
7566 constexpr unsigned int inputChannels = 1;
7567 constexpr unsigned int inputBatchSize = 1;
7568
7569 constexpr unsigned int outputWidth = inputWidth;
7570 constexpr unsigned int outputHeight = inputHeight;
7571 constexpr unsigned int outputChannels = inputChannels;
7572 constexpr unsigned int outputBatchSize = inputBatchSize;
7573
7574 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7575 armnn::DataType::QuantisedAsymm8);
7576 inputTensorInfo.SetQuantizationScale(1.5f);
7577 inputTensorInfo.SetQuantizationOffset(-3);
7578
7579 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7580 armnn::DataType::QuantisedAsymm8);
7581 outputTensorInfo.SetQuantizationScale(1.5f);
7582 outputTensorInfo.SetQuantizationOffset(-3);
7583
7584 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7585 1, 2, 3, 4,
7586 2, 3, 4, 5,
7587 3, 4, 5, 6,
7588 4, 5, 6, 7
7589 }));
7590
7591 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7592 result.outputExpected = input;
7593
7594 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7595 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7596
7597 armnn::ResizeBilinearQueueDescriptor descriptor;
7598 armnn::WorkloadInfo info;
7599 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7600 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7601
7602 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7603
7604 inputHandle->Allocate();
7605 outputHandle->Allocate();
7606 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7607
Derek Lambertif30f7d32019-04-09 10:25:02 +01007608 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007609 workload->Execute();
7610
7611 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7612 return result;
7613}
7614
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007615LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
7616 armnn::IWorkloadFactory& workloadFactory,
7617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007618{
7619 constexpr unsigned int inputWidth = 2;
7620 constexpr unsigned int inputHeight = 2;
7621 constexpr unsigned int inputChannels = 1;
7622 constexpr unsigned int inputBatchSize = 1;
7623
7624 constexpr unsigned int outputWidth = inputWidth / 2;
7625 constexpr unsigned int outputHeight = inputHeight / 2;
7626 constexpr unsigned int outputChannels = inputChannels;
7627 constexpr unsigned int outputBatchSize = inputBatchSize;
7628
7629 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7630 armnn::DataType::QuantisedAsymm8);
7631 inputTensorInfo.SetQuantizationScale(0.1567f);
7632 inputTensorInfo.SetQuantizationOffset(1);
7633
7634 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7635 armnn::DataType::QuantisedAsymm8);
7636 outputTensorInfo.SetQuantizationScale(0.1567f);
7637 outputTensorInfo.SetQuantizationOffset(1);
7638
7639 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7640 1, 255,
7641 200, 250
7642 }));
7643
7644 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
7645 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01007646 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00007647 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
7648 // the centre).
7649 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7650 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7651 1
7652 }));
7653
7654 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7655 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7656
7657 armnn::ResizeBilinearQueueDescriptor descriptor;
7658 armnn::WorkloadInfo info;
7659 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7660 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7661
7662 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7663
7664 inputHandle->Allocate();
7665 outputHandle->Allocate();
7666 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7667
Derek Lambertif30f7d32019-04-09 10:25:02 +01007668 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007669 workload->Execute();
7670
7671 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7672 return result;
7673}
7674
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007675LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
7676 armnn::IWorkloadFactory& workloadFactory,
7677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007678{
7679 constexpr unsigned int inputWidth = 4;
7680 constexpr unsigned int inputHeight = 4;
7681 constexpr unsigned int inputChannels = 1;
7682 constexpr unsigned int inputBatchSize = 1;
7683
7684 constexpr unsigned int outputWidth = inputWidth / 2;
7685 constexpr unsigned int outputHeight = inputHeight / 2;
7686 constexpr unsigned int outputChannels = inputChannels;
7687 constexpr unsigned int outputBatchSize = inputBatchSize;
7688
7689 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7690 armnn::DataType::QuantisedAsymm8);
7691 inputTensorInfo.SetQuantizationScale(3.141592f);
7692 inputTensorInfo.SetQuantizationOffset(3);
7693
7694 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7695 armnn::DataType::QuantisedAsymm8);
7696 outputTensorInfo.SetQuantizationScale(3.141592f);
7697 outputTensorInfo.SetQuantizationOffset(3);
7698
7699 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7700 1, 2, 3, 4,
7701 2, 3, 4, 5,
7702 3, 4, 5, 6,
7703 4, 5, 6, 7
7704 }));
7705
7706 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7707 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7708 1, 3,
7709 3, 5
7710 }));
7711
7712 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7713 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7714
7715 armnn::ResizeBilinearQueueDescriptor descriptor;
7716 armnn::WorkloadInfo info;
7717 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7718 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7719
7720 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7721
7722 inputHandle->Allocate();
7723 outputHandle->Allocate();
7724 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7725
Derek Lambertif30f7d32019-04-09 10:25:02 +01007726 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007727 workload->Execute();
7728
7729 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7730 return result;
7731}
7732
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007733LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
7734 armnn::IWorkloadFactory& workloadFactory,
7735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007736{
7737 constexpr unsigned int inputWidth = 3;
7738 constexpr unsigned int inputHeight = 2;
7739 constexpr unsigned int inputChannels = 1;
7740 constexpr unsigned int inputBatchSize = 1;
7741
7742 constexpr unsigned int outputWidth = 2;
7743 constexpr unsigned int outputHeight = 1;
7744 constexpr unsigned int outputChannels = inputChannels;
7745 constexpr unsigned int outputBatchSize = inputBatchSize;
7746
7747 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7748 armnn::DataType::QuantisedAsymm8);
7749 inputTensorInfo.SetQuantizationScale(1.5f);
7750 inputTensorInfo.SetQuantizationOffset(-1);
7751
7752 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7753 armnn::DataType::QuantisedAsymm8);
7754 outputTensorInfo.SetQuantizationScale(1.5f);
7755 outputTensorInfo.SetQuantizationOffset(-1);
7756
7757 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7758 1, 2, 3, // 3.0, 4.5, 6.0
7759 5, 8, 13 // 9.0, 13.5, 21.0
7760 }));
7761
7762 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7763 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7764 1, 3 // 3.0, 5.25
7765 }));
7766
7767 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7768 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7769
7770 armnn::ResizeBilinearQueueDescriptor descriptor;
7771 armnn::WorkloadInfo info;
7772 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7773 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7774
7775 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7776
7777 inputHandle->Allocate();
7778 outputHandle->Allocate();
7779
7780 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7781
Derek Lambertif30f7d32019-04-09 10:25:02 +01007782 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007783 workload->Execute();
7784
7785 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7786 return result;
7787}
7788
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007789LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
7790 armnn::IWorkloadFactory& workloadFactory,
7791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007792{
7793 constexpr unsigned int inputWidth = 2;
7794 constexpr unsigned int inputHeight = 3;
7795 constexpr unsigned int inputChannels = 1;
7796 constexpr unsigned int inputBatchSize = 1;
7797
7798 constexpr unsigned int outputWidth = 5;
7799 constexpr unsigned int outputHeight = 3;
7800 constexpr unsigned int outputChannels = inputChannels;
7801 constexpr unsigned int outputBatchSize = inputBatchSize;
7802
7803 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7804 armnn::DataType::QuantisedAsymm8);
7805 inputTensorInfo.SetQuantizationScale(0.010765f);
7806 inputTensorInfo.SetQuantizationOffset(7);
7807
7808 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7809 armnn::DataType::QuantisedAsymm8);
7810 outputTensorInfo.SetQuantizationScale(0.010132f);
7811 outputTensorInfo.SetQuantizationOffset(-18);
7812
7813 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7814 24, 228, // 0.183005, 2.379065,
7815 105, 128, // 1.05497, 1.302565
7816 230, 71 // 2.400595, 0.68896
7817 }));
7818
7819 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7820 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7821 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
7822 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
7823 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
7824 }));
7825
7826 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7827 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7828
7829 armnn::ResizeBilinearQueueDescriptor descriptor;
7830 armnn::WorkloadInfo info;
7831 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7832 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7833
7834 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7835
7836 inputHandle->Allocate();
7837 outputHandle->Allocate();
7838 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7839
Derek Lambertif30f7d32019-04-09 10:25:02 +01007840 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007841 workload->Execute();
7842
7843 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7844 return result;
7845}
7846
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007847LayerTestResult<float, 2> Rsqrt2dTestCommon(
7848 armnn::IWorkloadFactory& workloadFactory,
7849 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7850 const armnn::TensorInfo inputTensorInfo,
7851 const armnn::TensorInfo outputTensorInfo,
7852 std::vector<float> inputValues,
7853 std::vector<float> expectedOutputValues)
7854{
7855 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, std::vector<float>(inputValues));
7856
7857 LayerTestResult<float, 2> result(outputTensorInfo);
7858 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(expectedOutputValues));
7859
7860 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7861 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7862
7863 armnn::RsqrtQueueDescriptor descriptor;
7864
7865 armnn::WorkloadInfo info;
7866
7867 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7868 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7869
7870 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7871
7872 inputHandle->Allocate();
7873 outputHandle->Allocate();
7874
7875 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7876
Derek Lambertif30f7d32019-04-09 10:25:02 +01007877 workload->PostAllocationConfigure();
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007878 workload->Execute();
7879
7880 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7881
7882 return result;
7883}
7884LayerTestResult<float, 2> Rsqrt2dTest(
7885 armnn::IWorkloadFactory& workloadFactory,
7886 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7887{
7888 const armnn::TensorShape inputShape{ 2, 2 };
7889 const armnn::TensorShape outputShape{ 2, 2 };
7890
7891 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7892 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7893
7894 std::vector<float> inputValues
7895 {
7896 1.f, 4.f,
7897 16.f, 25.f
7898 };
7899
7900 std::vector<float> expectedOutputValues
7901 {
7902 1.f, 0.5f,
7903 0.25f, 0.2f
7904 };
7905
7906 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7907 inputTensorInfo, outputTensorInfo,
7908 inputValues, expectedOutputValues);
7909}
7910
7911LayerTestResult<float, 3> Rsqrt3dTest(
7912 armnn::IWorkloadFactory& workloadFactory,
7913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7914{
7915 const armnn::TensorShape inputShape{ 3, 1, 2 };
7916 const armnn::TensorShape outputShape{ 3, 1, 2 };
7917
7918 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7919 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7920
7921 std::vector<float> inputValues
7922 {
7923 1.f, 4.f, 16.f,
7924 25.f, 64.f, 100.f
7925 };
7926
7927 std::vector<float> expectedOutputValues
7928 {
7929 1.f, 0.5f, 0.25f,
7930 0.2f, 0.125f, 0.1f
7931 };
7932
7933 auto inputTensor = MakeTensor<float, 3>(inputTensorInfo, std::vector<float>(inputValues));
7934
7935 LayerTestResult<float, 3> result(outputTensorInfo);
7936 result.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float >(expectedOutputValues));
7937
7938 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7939 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7940
7941 armnn::RsqrtQueueDescriptor descriptor;
7942
7943 armnn::WorkloadInfo info;
7944
7945 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7946 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7947
7948 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7949
7950 inputHandle->Allocate();
7951 outputHandle->Allocate();
7952
7953 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
7954
Derek Lambertif30f7d32019-04-09 10:25:02 +01007955 workload->PostAllocationConfigure();
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007956 workload->Execute();
7957
7958 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
7959
7960 return result;
7961}
7962
7963LayerTestResult<float, 2> RsqrtZeroTest(
7964 armnn::IWorkloadFactory& workloadFactory,
7965 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7966{
7967 const armnn::TensorShape inputShape{ 1, 2 };
7968 const armnn::TensorShape outputShape{ 1, 2 };
7969
7970 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7971 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7972
7973 std::vector<float> inputValues
7974 {
7975 0.f, -0.f
7976 };
7977
7978 std::vector<float> expectedOutputValues
7979 {
7980 INFINITY, -INFINITY
7981 };
7982
7983 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7984 inputTensorInfo, outputTensorInfo,
7985 inputValues, expectedOutputValues);
7986}
7987
7988LayerTestResult<float, 2> RsqrtNegativeTest(
7989 armnn::IWorkloadFactory& workloadFactory,
7990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7991{
7992 const armnn::TensorShape inputShape{ 1, 2 };
7993 const armnn::TensorShape outputShape{ 1, 2 };
7994
7995 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7996 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7997
7998 std::vector<float> inputValues
7999 {
8000 -25.f, -16.f
8001 };
8002
8003 std::vector<float> expectedOutputValues
8004 {
8005 -NAN, -NAN
8006 };
8007
8008 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
8009 inputTensorInfo, outputTensorInfo,
8010 inputValues, expectedOutputValues);
8011}
8012
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008013LayerTestResult<float, 4> BatchNormTest(
8014 armnn::IWorkloadFactory& workloadFactory,
8015 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008016{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008017 // BatchSize: 1
8018 // Channels: 2
8019 // Height: 3
8020 // Width: 2
8021
8022 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8023 std::vector<float> inputValues
8024 {
8025 // Batch 0, Channel 0, Height (3) x Width (2)
8026 1.f, 4.f,
8027 4.f, 2.f,
8028 1.f, 6.f,
8029
8030 // Batch 0, Channel 1, Height (3) x Width (2)
8031 1.f, 1.f,
8032 4.f, 1.f,
8033 -2.f, 4.f
8034 };
8035 std::vector<float> expectedOutputValues
8036 {
8037 // Batch 0, Channel 0, Height (3) x Width (2)
8038 1.f, 4.f,
8039 4.f, 2.f,
8040 1.f, 6.f,
8041
8042 // Batch 0, Channel 1, Height (3) x Width (2)
8043 3.f, 3.f,
8044 4.f, 3.f,
8045 2.f, 4.f
8046 };
8047
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008048 return BatchNormTestImpl<armnn::DataType::Float32>(
8049 workloadFactory, memoryManager,
8050 inputOutputShape, inputValues, expectedOutputValues,
8051 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008052}
8053
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008054LayerTestResult<float, 4> BatchNormNhwcTest(
8055 armnn::IWorkloadFactory& workloadFactory,
8056 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008057{
8058 // BatchSize: 1
8059 // Height: 3
8060 // Width: 2
8061 // Channels: 2
8062
8063 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8064 std::vector<float> inputValues
8065 {
8066 // Batch 0, Height 0, Width (2) x Channel (2)
8067 1.f, 1.f,
8068 4.f, 1.f,
8069
8070 // Batch 0, Height 1, Width (2) x Channel (2)
8071 4.f, 4.f,
8072 2.f, 1.f,
8073
8074 // Batch 0, Height 2, Width (2) x Channel (2)
8075 1.f, -2.f,
8076 6.f, 4.f
8077 };
8078 std::vector<float> expectedOutputValues
8079 {
8080 // Batch 0, Height 0, Width (2) x Channel (2)
8081 1.f, 3.f,
8082 4.f, 3.f,
8083
8084 // Batch 0, Height 1, Width (2) x Channel (2)
8085 4.f, 4.f,
8086 2.f, 3.f,
8087
8088 // Batch 0, Height 2, Width (2) x Channel (2)
8089 1.f, 2.f,
8090 6.f, 4.f
8091 };
8092
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008093 return BatchNormTestImpl<armnn::DataType::Float32>(
8094 workloadFactory, memoryManager,
8095 inputOutputShape, inputValues, expectedOutputValues,
8096 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008097}
8098
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008099LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8100 armnn::IWorkloadFactory& workloadFactory,
8101 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008102{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008103 // BatchSize: 1
8104 // Channels: 2
8105 // Height: 3
8106 // Width: 2
8107
8108 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8109 std::vector<float> inputValues
8110 {
8111 // Batch 0, Channel 0, Height (3) x Width (2)
8112 1.f, 4.f,
8113 4.f, 2.f,
8114 1.f, 6.f,
8115
8116 // Batch 0, Channel 1, Height (3) x Width (2)
8117 1.f, 1.f,
8118 4.f, 1.f,
8119 -2.f, 4.f
8120 };
8121 std::vector<float> expectedOutputValues
8122 {
8123 // Batch 0, Channel 0, Height (3) x Width (2)
8124 1.f, 4.f,
8125 4.f, 2.f,
8126 1.f, 6.f,
8127
8128 // Batch 0, Channel 1, Height (3) x Width (2)
8129 3.f, 3.f,
8130 4.f, 3.f,
8131 2.f, 4.f
8132 };
8133
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008134 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8135 workloadFactory, memoryManager,
8136 inputOutputShape, inputValues, expectedOutputValues,
8137 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008138}
8139
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008140LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8141 armnn::IWorkloadFactory& workloadFactory,
8142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008143{
8144 // BatchSize: 1
8145 // Height: 3
8146 // Width: 2
8147 // Channels: 2
8148
8149 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8150 std::vector<float> inputValues
8151 {
8152 // Batch 0, Height 0, Width (2) x Channel (2)
8153 1.f, 1.f,
8154 4.f, 1.f,
8155
8156 // Batch 0, Height 1, Width (2) x Channel (2)
8157 4.f, 4.f,
8158 2.f, 1.f,
8159
8160 // Batch 0, Height 2, Width (2) x Channel (2)
8161 1.f, -2.f,
8162 6.f, 4.f
8163 };
8164 std::vector<float> expectedOutputValues
8165 {
8166 // Batch 0, Height 0, Width (2) x Channel (2)
8167 1.f, 3.f,
8168 4.f, 3.f,
8169
8170 // Batch 0, Height 1, Width (2) x Channel (2)
8171 4.f, 4.f,
8172 2.f, 3.f,
8173
8174 // Batch 0, Height 2, Width (2) x Channel (2)
8175 1.f, 2.f,
8176 6.f, 4.f
8177 };
8178
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008179 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8180 (workloadFactory, memoryManager,
8181 inputOutputShape, inputValues, expectedOutputValues,
8182 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008183}
8184
Nina Drozd58ef2c62019-05-16 12:09:18 +01008185LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008186 armnn::IWorkloadFactory& workloadFactory,
8187 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008188{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008189 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008190}
8191
Nina Drozd58ef2c62019-05-16 12:09:18 +01008192LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8193 armnn::IWorkloadFactory& workloadFactory,
8194 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8195{
8196 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8197}
8198
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008199LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8200 armnn::IWorkloadFactory& workloadFactory,
8201 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008202{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008203 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008204}
8205
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008206LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8207 armnn::IWorkloadFactory& workloadFactory,
8208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008209{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008210 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008211}
8212
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008213LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8214 armnn::IWorkloadFactory& workloadFactory,
8215 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008216{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008217 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008218}
8219
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008220LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8221 armnn::IWorkloadFactory& workloadFactory,
8222 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008223{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008224 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8225 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008226}
8227
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008228LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8229 armnn::IWorkloadFactory& workloadFactory,
8230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008231{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008232 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8233 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008234}
8235
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008236LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8237 armnn::IWorkloadFactory& workloadFactory,
8238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008239{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008240 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008241}
8242
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008243LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8244 armnn::IWorkloadFactory& workloadFactory,
8245 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008246{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008247 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008248}
8249
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008250LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8251 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008252 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8253 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008254{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008255 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8256 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008257}
8258
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008259LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8260 armnn::IWorkloadFactory& workloadFactory,
8261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008262{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008263 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008264}
8265
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008266LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8267 armnn::IWorkloadFactory& workloadFactory,
8268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008269{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008270 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8271 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008272}
8273
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008274LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8275 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008276 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8277 bool useSubtensor)
8278{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008279 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8280 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008281}
8282
8283LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8284 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008286{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008287 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008288}
8289
8290LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8291 armnn::IWorkloadFactory& workloadFactory,
8292 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8293{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008294 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008295}
8296
8297LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8298 armnn::IWorkloadFactory& workloadFactory,
8299 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8300{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008301 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008302}
8303
8304LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8305 armnn::IWorkloadFactory& workloadFactory,
8306 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8307{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008308 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8309 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008310}
8311
8312LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8313 armnn::IWorkloadFactory& workloadFactory,
8314 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8315{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008316 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8317 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008318}
8319
8320LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8321 armnn::IWorkloadFactory& workloadFactory,
8322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8323{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008324 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8325 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008326}
8327
8328LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8329 armnn::IWorkloadFactory& workloadFactory,
8330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8331{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008332 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8333 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008334}
8335
8336LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8337 armnn::IWorkloadFactory& workloadFactory,
8338 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8339 bool useSubtensor)
8340{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008341 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8342 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008343}
8344
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008345LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8346 armnn::IWorkloadFactory& workloadFactory,
8347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8348 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008349{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008350 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8351 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008352}
8353
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008354LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8355 armnn::IWorkloadFactory& workloadFactory,
8356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8357 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008358{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008359 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008360 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008361}
8362
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008363LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8364 armnn::IWorkloadFactory& workloadFactory,
8365 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8366 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008367{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008368 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8369 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008370}
8371
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008372LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8373 armnn::IWorkloadFactory& workloadFactory,
8374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8375 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008376{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008377 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008378 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008379}
8380
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008381LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8382 armnn::IWorkloadFactory& workloadFactory,
8383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008384 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008385{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008386 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008387}
8388
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008389LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8390 armnn::IWorkloadFactory& workloadFactory,
8391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008392 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008393{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008394 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008395}
8396
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008397LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8398 armnn::IWorkloadFactory& workloadFactory,
8399 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008400 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008401{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008402 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008403}
8404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008405LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8406 armnn::IWorkloadFactory& workloadFactory,
8407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008408 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008410 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008411 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008412}
8413
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008414LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8415 armnn::IWorkloadFactory& workloadFactory,
8416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8417 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008418{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008419 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008420 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008421}
8422
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008423LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8424 armnn::IWorkloadFactory& workloadFactory,
8425 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008426{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008427 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008428}
8429
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008430LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8431 armnn::IWorkloadFactory& workloadFactory,
8432 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008433{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008434 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8435 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008436}
8437
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008438LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8439 armnn::IWorkloadFactory& workloadFactory,
8440 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008441 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008442{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008443 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008444}
8445
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008446LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8447 armnn::IWorkloadFactory& workloadFactory,
8448 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008449 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008450{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008451 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008452}
8453
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008454LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8455 armnn::IWorkloadFactory& workloadFactory,
8456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008457{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008458 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008459}
8460
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008461LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8462 armnn::IWorkloadFactory& workloadFactory,
8463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008464{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008465 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008466}
8467
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008468LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8469 armnn::IWorkloadFactory& workloadFactory,
8470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008471{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008472 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008473}
8474
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008475LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8476 armnn::IWorkloadFactory& workloadFactory,
8477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008478{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008479 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008480}
8481
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008482LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8483 armnn::IWorkloadFactory& workloadFactory,
8484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008485{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008486 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008487}
8488
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008489LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8490 armnn::IWorkloadFactory& workloadFactory,
8491 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008492{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008493 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008494}
8495
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008496LayerTestResult<float, 4> L2Pooling2dSize7Test(
8497 armnn::IWorkloadFactory& workloadFactory,
8498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008499{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008500 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008501}
8502
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008503LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8504 armnn::IWorkloadFactory& workloadFactory,
8505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008506{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008507 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008508}
8509
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008510LayerTestResult<float, 4> L2Pooling2dSize9Test(
8511 armnn::IWorkloadFactory& workloadFactory,
8512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008513{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008514 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008515}
8516
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008517LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8518 armnn::IWorkloadFactory& workloadFactory,
8519 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008520{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008521 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008522}
8523
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008524LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
8525 armnn::IWorkloadFactory& workloadFactory,
8526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008527{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008528 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008529}
8530
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008531LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
8532 armnn::IWorkloadFactory& workloadFactory,
8533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008534{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008535 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008536}
8537
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008538LayerTestResult<float, 4> ComparePooling2dTest(
8539 armnn::IWorkloadFactory& workloadFactory,
8540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8541 armnn::IWorkloadFactory& refWorkloadFactory,
8542 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008543{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008544 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008545 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00008546}
8547
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008548LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
8549 armnn::IWorkloadFactory& workloadFactory,
8550 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8551 armnn::IWorkloadFactory& refWorkloadFactory,
8552 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008553{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008554 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008555 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008556}
8557
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008558LayerTestResult<float, 2> FullyConnectedLargeTest(
8559 armnn::IWorkloadFactory& workloadFactory,
8560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8561 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00008562{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008563 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00008564}
8565
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008566LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8567 armnn::IWorkloadFactory& workloadFactory,
8568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008569{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008570 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008571}
8572
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008573LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8574 armnn::IWorkloadFactory& workloadFactory,
8575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008576{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008577 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8578 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008579}
8580
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008581LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8582 armnn::IWorkloadFactory& workloadFactory,
8583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008584{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008585 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008586}
8587
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008588LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8589 armnn::IWorkloadFactory& workloadFactory,
8590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008591{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008592 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8593 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008594}
8595
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008596LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8597 armnn::IWorkloadFactory& workloadFactory,
8598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008599{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008600 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008601}
8602
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008603LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8604 armnn::IWorkloadFactory& workloadFactory,
8605 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008606{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008607 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8608 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008609}
8610
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008611LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8612 armnn::IWorkloadFactory& workloadFactory,
8613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008614{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008615 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8616 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008617}
8618
8619LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008620 armnn::IWorkloadFactory& workloadFactory,
8621 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008622{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008623 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8624 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008625}
8626
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008627LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8628 armnn::IWorkloadFactory& workloadFactory,
8629 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008630{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008631 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008632}
8633
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008634LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8635 armnn::IWorkloadFactory& workloadFactory,
8636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008637{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008638 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8639 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008640}
8641
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008642LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
8643 armnn::IWorkloadFactory& workloadFactory,
8644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008645{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008646 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008647}
8648
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008649LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
8650 armnn::IWorkloadFactory& workloadFactory,
8651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008652{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008653 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008654}
8655
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008656LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
8657 armnn::IWorkloadFactory& workloadFactory,
8658 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008659{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008660 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008661}
8662
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008663LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
8664 armnn::IWorkloadFactory& workloadFactory,
8665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008666{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008667 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008668}
8669
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008670LayerTestResult<float, 4> SimplePermuteFloat32Test(
8671 armnn::IWorkloadFactory& workloadFactory,
8672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008673{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008674 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008675};
8676
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008677LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
8678 armnn::IWorkloadFactory& workloadFactory,
8679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008680{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008681 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008682};
surmeh01bceff2f2018-03-29 16:29:27 +01008683
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008684LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
8685 armnn::IWorkloadFactory& workloadFactory,
8686 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008687{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008688 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008689};
8690
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008691LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
8692 armnn::IWorkloadFactory& workloadFactory,
8693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008694{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008695 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008696};
8697
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008698LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
8699 armnn::IWorkloadFactory& workloadFactory,
8700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008701{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008702 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01008703};
8704
8705namespace
8706{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008707
narpra011e4c31d2018-09-28 11:07:51 +01008708template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008709LayerTestResult<T, OutputDim> MeanTestHelper(
8710 armnn::IWorkloadFactory& workloadFactory,
8711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8712 const unsigned int* inputShape,
8713 const std::vector<T>& inputData,
8714 const std::vector<unsigned int>& axis,
8715 bool keepDims,
8716 const unsigned int* outputShape,
8717 const std::vector<T>& outputData,
8718 float scale = 1.0f,
8719 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01008720{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008721 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01008722
8723 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
8724 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
8725
8726 inputTensorInfo.SetQuantizationScale(scale);
8727 inputTensorInfo.SetQuantizationOffset(offset);
8728
8729 outputTensorInfo.SetQuantizationScale(scale);
8730 outputTensorInfo.SetQuantizationOffset(offset);
8731
8732 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
8733
8734 LayerTestResult<T, OutputDim> result(outputTensorInfo);
8735 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
8736
8737 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
8738 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8739
8740 armnn::MeanQueueDescriptor data;
8741 data.m_Parameters.m_Axis = axis;
8742 data.m_Parameters.m_KeepDims = keepDims;
8743 armnn::WorkloadInfo info;
8744 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
8745 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8746
8747 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
8748
8749 inputHandle->Allocate();
8750 outputHandle->Allocate();
8751
8752 CopyDataToITensorHandle(inputHandle.get(), input.origin());
8753
Derek Lambertif30f7d32019-04-09 10:25:02 +01008754 workload->PostAllocationConfigure();
narpra011e4c31d2018-09-28 11:07:51 +01008755 workload->Execute();
8756
8757 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
8758
8759 return result;
8760}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008761
narpra011e4c31d2018-09-28 11:07:51 +01008762} // anonymous namespace
8763
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008764LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
8765 armnn::IWorkloadFactory& workloadFactory,
8766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008767{
8768 const unsigned int inputShape[] = { 3, 2 };
8769 const unsigned int outputShape[] = { 1 };
8770
8771 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8772 std::vector<uint8_t> output({ 2 });
8773
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008774 return MeanTestHelper<uint8_t, 2, 1>(
8775 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008776}
8777
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008778LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
8779 armnn::IWorkloadFactory& workloadFactory,
8780 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008781{
8782 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8783 const unsigned int outputShape[] = { 1, 1, 2 };
8784
8785 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8786 std::vector<uint8_t> output({ 2, 2 });
8787
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008788 return MeanTestHelper<uint8_t, 4, 3>(
8789 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008790}
8791
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008792LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
8793 armnn::IWorkloadFactory& workloadFactory,
8794 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008795{
8796 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8797 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8798
8799 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8800 std::vector<uint8_t> output({ 2, 2 });
8801
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008802 return MeanTestHelper<uint8_t, 4, 4>(
8803 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008804}
8805
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008806LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
8807 armnn::IWorkloadFactory& workloadFactory,
8808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008809{
8810 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8811 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8812
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008813 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01008814 std::vector<uint8_t> output({ 1, 3, 5 });
8815
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008816 return MeanTestHelper<uint8_t, 4, 4>(
8817 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008818}
8819
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008820LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
8821 armnn::IWorkloadFactory& workloadFactory,
8822 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008823{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008824 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008825 const unsigned int outputShape[] = { 2 };
8826
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008827 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
8828 24 });
8829 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01008830
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008831 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
8832 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008833 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01008834}
8835
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008836LayerTestResult<float, 1> MeanFloatSimpleTest(
8837 armnn::IWorkloadFactory& workloadFactory,
8838 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008839{
8840 const unsigned int inputShape[] = { 3, 2 };
8841 const unsigned int outputShape[] = { 1 };
8842
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008843 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8844 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008845
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008846 return MeanTestHelper<float, 2, 1>(
8847 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008848}
8849
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008850LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
8851 armnn::IWorkloadFactory& workloadFactory,
8852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008853{
8854 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8855 const unsigned int outputShape[] = { 3, 1, 2 };
8856
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008857 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8858 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008859
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008860 return MeanTestHelper<float, 4, 3>(
8861 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008862}
8863
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008864LayerTestResult<float, 4> MeanFloatKeepDimsTest(
8865 armnn::IWorkloadFactory& workloadFactory,
8866 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008867{
8868 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8869 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8870
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008871 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8872 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008873
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008874 return MeanTestHelper<float, 4, 4>(
8875 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008876}
8877
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008878LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
8879 armnn::IWorkloadFactory& workloadFactory,
8880 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008881{
8882 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8883 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8884
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008885 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8886 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008887
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008888 return MeanTestHelper<float, 4, 4>(
8889 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008890}
8891
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008892LayerTestResult<float, 1> MeanVtsFloat1Test(
8893 armnn::IWorkloadFactory& workloadFactory,
8894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008895{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008896 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008897 const unsigned int outputShape[] = { 2 };
8898
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008899 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8900 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8901 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008902
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008903 return MeanTestHelper<float, 3, 1>(
8904 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008905}
8906
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008907LayerTestResult<float, 3> MeanVtsFloat2Test(
8908 armnn::IWorkloadFactory& workloadFactory,
8909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008910{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008911 const unsigned int inputShape[] = { 4, 3, 2 };
8912 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01008913
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008914 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8915 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8916 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008918 return MeanTestHelper<float, 3, 3>(
8919 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008920}
8921
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008922LayerTestResult<float, 3> MeanVtsFloat3Test(
8923 armnn::IWorkloadFactory& workloadFactory,
8924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008925{
8926 const unsigned int inputShape[] = { 1, 2, 2, 1 };
8927 const unsigned int outputShape[] = { 1, 2, 1 };
8928
8929 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
8930 std::vector<float> output({ 1.5f, 3.5f });
8931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008932 return MeanTestHelper<float, 4, 3>(
8933 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008934}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008935
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008936LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
8937 armnn::IWorkloadFactory& workloadFactory,
8938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008939{
8940 // Create Initial Tensor
8941 // 1, 2, 3
8942 // 4, 5, 6
8943 // 7, 8, 9
8944
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008945 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
8946 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008947
8948 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
8949 {1, 2, 3,
8950 4, 5, 6,
8951 7, 8, 9
8952 });
8953
8954 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
8955 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
8956 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
8957 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
8958
8959 // Apply MaxPool poolSize = 1x1, stride=2x2
8960 // Result =
8961 // 1, 3
8962 // 7, 9
8963 armnn::Pooling2dDescriptor descriptor;
8964 descriptor.m_PoolHeight = 1;
8965 descriptor.m_PoolWidth = 1;
8966 descriptor.m_StrideX = 2;
8967 descriptor.m_StrideY = 2;
8968 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
8969
8970 armnn::Pooling2dQueueDescriptor queueDescriptor;
8971 queueDescriptor.m_Parameters = descriptor;
8972 armnn::WorkloadInfo workloadInfo;
8973 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
8974 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
8975
8976 // Create the MaxPool
8977 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
8978
8979 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
8980 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
8981 boost::multi_array<float, 4> resultMaxPool;
8982 resultMaxPool.resize(shape);
8983
8984
8985 // Create addition with another tensor the same size
8986 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
8987 // with the initial tensor.
8988 // 12, 16
8989 // 24, 28
8990
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008991 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
8992 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008993
8994 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
8995 {12, 16,
8996 24, 28,
8997 });
8998
8999 // Expected output tensor after MaxPool and Addition.
9000 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9001 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9002 {
9003 13, 19,
9004 31, 37
9005 }));
9006
9007 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9008 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9009
9010 armnn::AdditionQueueDescriptor data;
9011 armnn::WorkloadInfo info;
9012
9013 // Add the output of the MaxPool and the new tensor
9014 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9015 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9016 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9017
9018 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9019
9020 poolingInputHandle->Allocate();
9021 poolingOutputHandle->Allocate();
9022 addInputHandle->Allocate();
9023 addOutputHandle->Allocate();
9024
9025 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9026 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9027
9028 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9029 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9030
Derek Lambertif30f7d32019-04-09 10:25:02 +01009031 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009032 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009033 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009034 addWorkload->Execute();
9035
9036 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9037
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009038 return addRet;
9039}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009040
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009041LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9042 armnn::IWorkloadFactory& workloadFactory,
9043 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009044{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009045 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009046}
9047
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009048LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9049 armnn::IWorkloadFactory& workloadFactory,
9050 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009051{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009052 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009053}
9054
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009055LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9056 armnn::IWorkloadFactory& workloadFactory,
9057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009058{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009059 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009060}
9061
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009062LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9063 armnn::IWorkloadFactory& workloadFactory,
9064 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009065{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009066 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009067}
9068
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009069LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9070 armnn::IWorkloadFactory& workloadFactory,
9071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009072{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009073 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009074}
9075
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009076LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9077 armnn::IWorkloadFactory& workloadFactory,
9078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009079{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009080 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009081}
9082
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009083LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9084 armnn::IWorkloadFactory& workloadFactory,
9085 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009086{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009087 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009088}
9089
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009090LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9091 armnn::IWorkloadFactory& workloadFactory,
9092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009093{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009094 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009095}
9096
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009097LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9098 armnn::IWorkloadFactory& workloadFactory,
9099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009100{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009101 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009102}
9103
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009104LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9105 armnn::IWorkloadFactory& workloadFactory,
9106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009107{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009108 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009109}
9110
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009111LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9112 armnn::IWorkloadFactory& workloadFactory,
9113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009114{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009115 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009116}
9117
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009118LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9119 armnn::IWorkloadFactory& workloadFactory,
9120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009121{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009122 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009123}
9124
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009125LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9126 armnn::IWorkloadFactory& workloadFactory,
9127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009128{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009129 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009130}
9131
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009132LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9133 armnn::IWorkloadFactory& workloadFactory,
9134 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009135{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009136 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009137}
9138
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009139LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9140 armnn::IWorkloadFactory& workloadFactory,
9141 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009142{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009143 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009144}
9145
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009146LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9147 armnn::IWorkloadFactory& workloadFactory,
9148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009149{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009150 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009151}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009152
9153namespace {
9154
9155template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009156LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
9157 armnn::IWorkloadFactory &workloadFactory,
9158 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9159 const armnn::DataLayout& dataLayout,
9160 const unsigned int *inputShape,
9161 const std::vector<T> &inputData,
9162 const std::vector<unsigned int> &blockShape,
9163 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
9164 const unsigned int *outputShape,
9165 const std::vector<T> &outputData,
9166 float scale = 1.0f,
9167 int32_t offset = 0)
Derek Lambertif30f7d32019-04-09 10:25:02 +01009168{
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009169 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
9170
9171 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
9172 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
9173
9174 inputTensorInfo.SetQuantizationScale(scale);
9175 inputTensorInfo.SetQuantizationOffset(offset);
9176
9177 outputTensorInfo.SetQuantizationScale(scale);
9178 outputTensorInfo.SetQuantizationOffset(offset);
9179
9180 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
9181
9182 LayerTestResult<T, OutputDim> result(outputTensorInfo);
9183 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
9184
9185 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
9186 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
9187
9188 armnn::BatchToSpaceNdQueueDescriptor data;
9189 data.m_Parameters.m_DataLayout = dataLayout;
9190 data.m_Parameters.m_BlockShape = blockShape;
9191 data.m_Parameters.m_Crops = crops;
9192 armnn::WorkloadInfo info;
9193 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
9194 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
9195
9196 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
9197
9198 inputHandle->Allocate();
9199 outputHandle->Allocate();
9200
9201 CopyDataToITensorHandle(inputHandle.get(), input.origin());
9202
Derek Lambertif30f7d32019-04-09 10:25:02 +01009203 workload->PostAllocationConfigure();
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009204 workload->Execute();
9205
9206 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
9207
9208 return result;
9209}
9210
9211} // anonymous namespace
9212
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009213LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
9214 armnn::IWorkloadFactory& workloadFactory,
9215 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009216{
9217 const unsigned int inputShape[] = {4, 2, 2, 1};
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009218 const unsigned int outputShape[] = {1, 4, 4, 1};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009219
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009220 std::vector<float> input({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009221 // Batch 0, Height 0, Width (2) x Channel (1)
9222 1.0f, 3.0f,
9223 // Batch 0, Height 1, Width (2) x Channel (1)
9224 9.0f, 11.0f,
9225
9226
9227 // Batch 1, Height 0, Width (2) x Channel (1)
9228 2.0f, 4.0f,
9229 // Batch 1, Height 1, Width (2) x Channel (1)
9230 10.0f, 12.0f,
9231
9232
9233 // Batch 2, Height 0, Width (2) x Channel (1)
9234 5.0f, 7.0f,
9235 // Batch 2, Height 1, Width (2) x Channel (1)
9236 13.0f, 15.0f,
9237
9238 // Batch 3, Height 0, Width (2) x Channel (3)
9239 6.0f, 8.0f,
9240 // Batch 3, Height 1, Width (2) x Channel (1)
9241 14.0f, 16.0f
9242 });
9243
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009244 std::vector<float> expectedOutput({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009245 1.0f, 2.0f, 3.0f, 4.0f,
9246 5.0f, 6.0f, 7.0f, 8.0f,
9247 9.0f, 10.0f, 11.0f, 12.0f,
9248 13.0f, 14.0f, 15.0f, 16.0f
9249 });
9250
9251 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009252 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009253
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009254 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9255 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009256 crops, outputShape, expectedOutput);
9257}
9258
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009259LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
9260 armnn::IWorkloadFactory& workloadFactory,
9261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009262{
9263 const unsigned int inputShape[] = {4, 1, 1, 1};
9264 const unsigned int outputShape[] = {1, 2, 2, 1};
9265
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009266 std::vector<float> input({
9267 // Batch 0, Height 0, Width (2) x Channel (1)
9268 1.0f, 2.0f, 3.0f, 4.0f
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009269 });
9270
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009271 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009272
9273 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009274 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009275
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009276 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9277 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9278 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009279}
9280
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009281LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
9282 armnn::IWorkloadFactory& workloadFactory,
9283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009284{
9285 const unsigned int inputShape[] = {4, 1, 1, 3};
9286 const unsigned int outputShape[] = {1, 2, 2, 3};
9287
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009288 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009289
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009290 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009291
9292 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009293 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009294
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009295 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9296 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9297 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009298}
9299
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009300LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test4(
9301 armnn::IWorkloadFactory& workloadFactory,
9302 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9303{
9304 const unsigned int inputShape[] = {8, 1, 3, 1};
9305 const unsigned int outputShape[] = {2, 2, 4, 1};
9306
9307 std::vector<float> input({
9308 0.0f, 1.0f, 3.0f,
9309 0.0f, 9.0f, 11.0f,
9310 0.0f, 2.0f, 4.0f,
9311 0.0f, 10.0f, 12.0f,
9312 0.0f, 5.0f, 7.0f,
9313 0.0f, 13.0f, 15.0f,
9314 0.0f, 6.0f, 8.0f,
9315 0.0f, 14.0f, 16.0f
9316 });
9317
9318 std::vector<float> expectedOutput({
9319 1.0f, 2.0f, 3.0f, 4.0f,
9320 5.0f, 6.0f, 7.0f, 8.0f,
9321 9.0f, 10.0f, 11.0f, 12.0f,
9322 13.0f, 14.0f, 15.0f, 16.0f
9323 });
9324
9325 std::vector<unsigned int> blockShape({2, 2});
9326 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9327
9328 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9329 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9330 crops, outputShape, expectedOutput);
9331}
9332
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009333LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
9334 armnn::IWorkloadFactory &workloadFactory,
9335 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009336{
9337 const unsigned int inputShape[] = {4, 3, 1, 1};
9338 const unsigned int outputShape[] = {1, 3, 2, 2};
9339
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009340 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009341
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009342 std::vector<float> expectedOutput({
9343 // Batch 0, Channel 0, Height (2) x Width (2)
9344 1.0f, 4.0f,
9345 7.0f, 10.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009346
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009347 // Batch 0, Channel 1, Height (2) x Width (2)
9348 2.0f, 5.0f,
9349 8.0f, 11.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009350
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009351 // Batch 0, Channel 2, Height (2) x Width (2)
9352 3.0f, 6.0f,
9353 9.0f, 12.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009354 });
9355
9356 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009357 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009358
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009359 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9360 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9361 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009362}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009363
Mike Kelly831faed2018-11-28 11:52:08 +00009364LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009365 armnn::IWorkloadFactory& workloadFactory,
9366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009367{
9368 const unsigned int inputShape[] = {4, 1, 1, 1};
9369 const unsigned int outputShape[] = {1, 1, 2, 2};
9370
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009371 std::vector<float> input({
9372 // Batch 0, Height 0, Width (2) x Channel (1)
9373 1.0f, 2.0f, 3.0f, 4.0f
9374 });
Mike Kelly831faed2018-11-28 11:52:08 +00009375
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009376 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009377
9378 std::vector<unsigned int> blockShape({2, 2});
9379 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9380
9381 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9382 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9383 crops, outputShape, expectedOutput);
9384}
9385
9386LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009387 armnn::IWorkloadFactory& workloadFactory,
9388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009389{
9390 const unsigned int inputShape[] = {4, 3, 1, 1};
9391 const unsigned int outputShape[] = {1, 3, 2, 2};
9392
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009393 std::vector<float> input({1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009394
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009395 std::vector<float> expectedOutput({
9396 // Batch 0, Channel 0, Height (2) x Width (2)
9397 1.0f, 7.0f,
9398 2.0f, 8.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009399
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009400 // Batch 0, Channel 1, Height (2) x Width (2)
9401 3.0f, 9.0f,
9402 4.0f, 10.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009403
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009404 // Batch 0, Channel 2, Height (2) x Width (2)
9405 5.0f, 11.0f,
9406 6.0f, 12.0f,
9407 });
Mike Kelly831faed2018-11-28 11:52:08 +00009408
9409 std::vector<unsigned int> blockShape({2, 2});
9410 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9411
9412 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9413 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9414 crops, outputShape, expectedOutput);
9415}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009416
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009417LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
9418 armnn::IWorkloadFactory& workloadFactory,
9419 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009420{
9421 const unsigned int inputShape[] = {4, 2, 2, 1};
9422 const unsigned int outputShape[] = {1, 4, 4, 1};
9423
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009424 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
9425 std::vector<uint8_t> expectedOutput({1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009426
9427 std::vector<unsigned int> blockShape({2, 2});
9428 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9429
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00009430 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
9431 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009432}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009433
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009434LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
9435 armnn::IWorkloadFactory& workloadFactory,
9436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9437{
9438 const unsigned int inputShape[] = {4, 1, 1, 1};
9439 const unsigned int outputShape[] = {1, 2, 2, 1};
9440
9441 std::vector<uint8_t> input({
9442 // Batch 0, Height 0, Width (2) x Channel (1)
9443 1, 2, 3, 4
9444 });
9445
9446 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9447
9448 std::vector<unsigned int> blockShape({2, 2});
9449 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9450
9451 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9452 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9453 crops, outputShape, expectedOutput);
9454}
9455
9456LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
9457 armnn::IWorkloadFactory& workloadFactory,
9458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9459{
9460 const unsigned int inputShape[] = {4, 1, 1, 3};
9461 const unsigned int outputShape[] = {1, 2, 2, 3};
9462
9463 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9464
9465 std::vector<uint8_t> expectedOutput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9466
9467 std::vector<unsigned int> blockShape({2, 2});
9468 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9469
9470 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9471 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9472 crops, outputShape, expectedOutput);
9473}
9474
9475
9476LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
9477 armnn::IWorkloadFactory &workloadFactory,
9478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9479{
9480 const unsigned int inputShape[] = {4, 3, 1, 1};
9481 const unsigned int outputShape[] = {1, 3, 2, 2};
9482
9483 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9484
9485 std::vector<uint8_t> expectedOutput({
9486 // Batch 0, Channel 0, Height (2) x Width (2)
9487 1, 4,
9488 7, 10,
9489
9490 // Batch 0, Channel 1, Height (2) x Width (2)
9491 2, 5,
9492 8, 11,
9493
9494 // Batch 0, Channel 2, Height (2) x Width (2)
9495 3, 6,
9496 9, 12,
9497 });
9498
9499 std::vector<unsigned int> blockShape({2, 2});
9500 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9501
9502 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9503 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9504 crops, outputShape, expectedOutput);
9505}
9506
9507LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
9508 armnn::IWorkloadFactory& workloadFactory,
9509 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9510{
9511 const unsigned int inputShape[] = {4, 1, 1, 1};
9512 const unsigned int outputShape[] = {1, 1, 2, 2};
9513
9514 std::vector<uint8_t> input({
9515 // Batch 0, Height 0, Width (2) x Channel (1)
9516 1, 2, 3, 4
9517 });
9518
9519 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9520
9521 std::vector<unsigned int> blockShape({2, 2});
9522 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9523
9524 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9525 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9526 crops, outputShape, expectedOutput);
9527}
9528
9529LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
9530 armnn::IWorkloadFactory& workloadFactory,
9531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9532{
9533 const unsigned int inputShape[] = {4, 3, 1, 1};
9534 const unsigned int outputShape[] = {1, 3, 2, 2};
9535
9536 std::vector<uint8_t> input({1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12});
9537
9538 std::vector<uint8_t> expectedOutput({
9539 // Batch 0, Channel 0, Height (2) x Width (2)
9540 1, 7,
9541 2, 8,
9542
9543 // Batch 0, Channel 1, Height (2) x Width (2)
9544 3, 9,
9545 4, 10,
9546
9547 // Batch 0, Channel 2, Height (2) x Width (2)
9548 5, 11,
9549 6, 12,
9550 });
9551
9552 std::vector<unsigned int> blockShape({2, 2});
9553 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9554
9555 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9556 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9557 crops, outputShape, expectedOutput);
9558}
9559
9560LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest4(
9561 armnn::IWorkloadFactory& workloadFactory,
9562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9563{
9564 const unsigned int inputShape[] = {8, 1, 1, 3};
9565 const unsigned int outputShape[] = {2, 1, 2, 4};
9566
9567 std::vector<uint8_t> input({
9568 0, 1, 3, 0, 9, 11,
9569 0, 2, 4, 0, 10, 12,
9570 0, 5, 7, 0, 13, 15,
9571 0, 6, 8, 0, 14, 16
9572 });
9573
9574 std::vector<uint8_t> expectedOutput({
9575 1, 2, 3, 4,
9576 5, 6, 7, 8,
9577 9, 10, 11, 12,
9578 13, 14, 15, 16
9579 });
9580
9581 std::vector<unsigned int> blockShape({2, 2});
9582 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9583
9584 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9585 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9586 crops, outputShape, expectedOutput);
9587}
9588
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009589LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9590 armnn::IWorkloadFactory& workloadFactory,
9591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9592{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009593 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009594}
9595
9596LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9597 armnn::IWorkloadFactory& workloadFactory,
9598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9599{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009600 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009601}
9602
9603LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9604 armnn::IWorkloadFactory& workloadFactory,
9605 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9606{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009607 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009608}
9609
9610LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9611 armnn::IWorkloadFactory& workloadFactory,
9612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9613{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009614 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009615}
9616
9617LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9618 armnn::IWorkloadFactory& workloadFactory,
9619 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9620{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009621 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009622}
9623
9624LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9625 armnn::IWorkloadFactory& workloadFactory,
9626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9627{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009628 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009629}
9630
9631LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9632 armnn::IWorkloadFactory& workloadFactory,
9633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9634{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009635 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009636}
9637
9638LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9639 armnn::IWorkloadFactory& workloadFactory,
9640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9641{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009642 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009643}
9644
9645LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9646 armnn::IWorkloadFactory& workloadFactory,
9647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9648{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009649 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009650}
9651
9652LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9653 armnn::IWorkloadFactory& workloadFactory,
9654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9655{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009656 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009657}
9658
9659LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9660 armnn::IWorkloadFactory& workloadFactory,
9661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9662{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009663 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009664}
9665
9666LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9667 armnn::IWorkloadFactory& workloadFactory,
9668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9669{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009670 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009671}
9672
9673LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9674 armnn::IWorkloadFactory& workloadFactory,
9675 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9676{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009677 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009678}
9679
9680LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9681 armnn::IWorkloadFactory& workloadFactory,
9682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9683{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009684 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009685}
9686
9687LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9688 armnn::IWorkloadFactory& workloadFactory,
9689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9690{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009691 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009692}
9693
9694LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9695 armnn::IWorkloadFactory& workloadFactory,
9696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9697{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009698 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009699}
9700
9701LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9702 armnn::IWorkloadFactory& workloadFactory,
9703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9704{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009705 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009706}
9707
9708LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9709 armnn::IWorkloadFactory& workloadFactory,
9710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9711{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009712 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009713}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009714
9715LayerTestResult<float, 4> Debug4DFloat32Test(
9716 armnn::IWorkloadFactory& workloadFactory,
9717 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9718{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009719 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009720}
9721
9722LayerTestResult<float, 3> Debug3DFloat32Test(
9723 armnn::IWorkloadFactory& workloadFactory,
9724 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9725{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009726 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009727}
9728
9729LayerTestResult<float, 2> Debug2DFloat32Test(
9730 armnn::IWorkloadFactory& workloadFactory,
9731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9732{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009733 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009734}
9735
9736LayerTestResult<float, 1> Debug1DFloat32Test(
9737 armnn::IWorkloadFactory& workloadFactory,
9738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9739{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009740 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009741}
9742
9743LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9744 armnn::IWorkloadFactory& workloadFactory,
9745 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9746{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009747 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009748}
9749
9750LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9751 armnn::IWorkloadFactory& workloadFactory,
9752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9753{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009754 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009755}
9756
9757LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9758 armnn::IWorkloadFactory& workloadFactory,
9759 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9760{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009761 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009762}
9763
9764LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9765 armnn::IWorkloadFactory& workloadFactory,
9766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9767{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009768 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009769}
Matteo Martincigh49124022019-01-11 13:25:59 +00009770
narpra014951d842019-01-18 16:53:53 +00009771LayerTestResult<float, 1> Gather1DParamsFloatTest(
9772 armnn::IWorkloadFactory& workloadFactory,
9773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9774{
9775 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9776}
9777
9778LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9779 armnn::IWorkloadFactory& workloadFactory,
9780 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9781{
9782 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9783}
9784
9785LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9786 armnn::IWorkloadFactory& workloadFactory,
9787 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9788{
9789 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9790}
9791
9792LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9793 armnn::IWorkloadFactory& workloadFactory,
9794 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9795{
9796 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9797}
9798
9799LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9800 armnn::IWorkloadFactory& workloadFactory,
9801 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9802{
9803 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9804}
9805
9806LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9807 armnn::IWorkloadFactory& workloadFactory,
9808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9809{
9810 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9811 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009812}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009813
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009814LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009815 armnn::IWorkloadFactory& workloadFactory,
9816 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9817{
9818 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9819}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009820
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009821LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9822 armnn::IWorkloadFactory& workloadFactory,
9823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9824{
9825 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9826}
9827
9828LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9829 armnn::IWorkloadFactory& workloadFactory,
9830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9831{
9832 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9833}
9834
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009835LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9836 armnn::IWorkloadFactory& workloadFactory,
9837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9838{
9839 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9840}
9841
9842LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9843 armnn::IWorkloadFactory& workloadFactory,
9844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9845{
9846 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9847}
9848
9849LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9850 armnn::IWorkloadFactory& workloadFactory,
9851 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9852{
9853 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9854}